source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_binop__eq_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__eq_uint16
// A.*B function (eWiseMult): GB_AemultB__eq_uint16
// A*D function (colscale): GB_AxD__eq_uint16
// D*A function (rowscale): GB_DxB__eq_uint16
// C+=B function (dense accum): GB_Cdense_accumB__eq_uint16
// C+=b function (dense accum): GB_Cdense_accumb__eq_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_uint16
// C=scalar+B GB_bind1st__eq_uint16
// C=scalar+B' GB_bind1st_tran__eq_uint16
// C=A+scalar GB_bind2nd__eq_uint16
// C=A'+scalar GB_bind2nd_tran__eq_uint16
// C type: bool
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_UINT16 || GxB_NO_EQ_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__eq_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__eq_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__eq_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__eq_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__eq_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__eq_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__eq_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__eq_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__eq_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__eq_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__eq_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(16*t2-Nz-499,512)),ceild(8*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(8*t1+Nx+7,512)),floord(16*t2+Nx+3,512)),floord(8*t3+Nx-5,512)),floord(16*t1-16*t2+Nz+Nx+5,512));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),128*t4+126);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
requires_reverse_offload.c | /// Based on OpenMP Spec 5.0 example: Example_target_reverse_offload.7.c
///
/// Expected failure until reverse_offload is supported:
/// --------> output: lld: error: undefined symbol: error_handler
///
#include <stdio.h>
#include <omp.h>
#define N 100
#pragma omp requires reverse_offload
void error_handler(int wrong_value, int index)
{
printf(" Error in offload: A[%d]=%d\n", index,wrong_value);
printf(" Expecting: A[i ]=i\n");
// output: Error in offload: A[99]=-1
// Expecting: A[i ]=i
}
// Ensure that error_handler is compiled for host only
#pragma omp declare target device_type(host) to(error_handler)
int main()
{
int A[N];
for (int i=0; i<N; i++) A[i] = i;
A[N-1]=-1;
#pragma omp target map(A)
{
for (int i=0; i<N; i++)
{
if (A[i] != i)
{
#pragma omp target device(ancestor: 1) map(always,to: A[i:1])
error_handler(A[i], i);
}
}
}
return 0;
}
|
sum_over_primes.c | #include "sum_over_primes.h"
#include <math.h>
//
// Common internal routines, needed by the implementation
//
/* */
/* Primes Sieve */
/* */
// we will index primes starting with 1, as maths do
// element zero will be set to 0
static inline uint generate_prime_list_up_to(u64 N, u64** out_prime) {
if(N < 2) return 0;
// we do quite simple Sieve of Eratosthenes:
// we consider only odd numbers and use bitfield for "is 2i+1 composite"
const u64 SBS = 8*sizeof(u64); // sieve bucket size (in bits)
u64* is_composite = calloc(N/(2*SBS) + 1, sizeof(u64));
#define BIT(i) (UINT64_C(1) << (i))
is_composite[0] = 1; // 1 is not prime
// i, ii - are indices in the sieve; j,jj are the corresponding odd numbers (2*i+1 and 2*ii+1)
for(u64 i = 1, j = 3 ; j*j <= N ; ++i, j += 2) {
const u64 ib = i / SBS, ie = i - ib*SBS; // bucket index and in-bucket bit index
if(is_composite[ib] & BIT(ie)) continue; // known composite - all multiples are marked already
// all composite numbers below j^2 have at least one multiple less than j - they were already marked
for(u64 jj = j*j, ii = (jj-1)/2 ; jj <= N ; ii += j, jj += 2*j) {
const u64 iib = ii / SBS, iie = ii - iib*SBS; // bucket index and in-bucket bit index
is_composite[iib] |= BIT(iie); // mark composite
}
}
uint count = 1; // include 2 immediately
for(u64 i = 0, in = (N-1)/2 ; i <= in ; ++i) {
const u64 ib = i / SBS, ie = i - ib*SBS; // bucket index and in-bucket bit index
if((is_composite[ib] & BIT(ie)) == 0) ++count; // prime
}
u64* prime = malloc(sizeof(u64[count+1]));
prime[0] = 0; prime[1] = 2; // we sieve only odd numbers so treat 2 separately
for(u64 i = 0, in = (N-1)/2, pi = 2 ; i <= in ; ++i) {
const u64 ib = i / SBS, ie = i - ib*SBS; // bucket index and in-bucket bit index
if((is_composite[ib] & BIT(ie)) == 0) { // prime
assert(pi <= count); prime[pi++] = 2*i+1;
}
}
#undef BIT
free(is_composite);
*out_prime = prime;
return count;
}
static inline void mark_composite_with_primes(u64 start, uint N, bool is_composite[static N],
uint p_start, uint p_end, const u64 prime[static p_end]
) {
const u64 end = start + N;
for(uint i = p_start ; i < p_end ; ++i) { const u64 p = prime[i];
for(u64 j = p * ((start+p-1)/p) ; j < end ; j += p)
is_composite[j-start] = true;
}
}
/* */
/* Binary Search */
/* */
/* search sorted (ascending) array for lowest index i, such that v <= a[i] */
/* this index can be used for insertion (to keep array sorted) */
/* comparison function acts like < (less) and is expected to be CMP(T,VT) */
/* i.e. if element is less than key */
#define IMPL_LOWER_BOUND_VTYPE(T, VT, NAME, CMP) \
static inline uint NAME(uint n, const T elem[const static n], VT v) { \
uint low = 0, high = n; /* start with whole array */ \
while(high > low) { /* more than one candidate */ \
const uint mid = low + (high - low) / 2;/* pick middle */ \
if(CMP(elem[mid], v)) low = mid + 1; /* a[mid] < v: no need to consider mid */ \
else high = mid; /* a[mid] >= v: pick first half */ \
} \
return low; \
}
#define IMPL_LOWER_BOUND(T, NAME, CMP) IMPL_LOWER_BOUND_VTYPE(T,T,NAME,CMP)
#define __LOWER_BOUND_IMPL_LESS_NUM(a,b) ((a)<(b))
IMPL_LOWER_BOUND_VTYPE(u64,u64,lower_bound_u64,__LOWER_BOUND_IMPL_LESS_NUM)
/* */
/* prime counting function for known primes */
/* */
static inline uint primepi_known(u64 x, uint prime_count, const u64 prime[prime_count+1]) {
const uint idx = lower_bound_u64(prime_count+1, prime, x);
return idx <= prime_count && x == prime[idx] ? idx : idx-1;
}
/* */
/* SQRT */
/* */
static inline u64 usqrt_64(u64 n) {
u64 bit_place = UINT64_C(1) << 31;
while(bit_place > n) bit_place >>= 1;
u64 root = 0; while(bit_place) {
const u64 root_cand = root | bit_place, square_cand = root_cand*root_cand;
if(square_cand <= n) root = root_cand;
bit_place >>= 1;
}
return root;
}
/* */
/* CBRT (Cubic root) */
/* */
static inline u64 ucbrt_64(u64 n) {
// modified code from hacker's delight, applying 64-bit specific fixes
u64 y = 0;
for(int s = 63 ; s >= 0 ; s -= 3) {
y = 2*y;
u64 b = 3*y*(y+1) + 1;
if((n >> s) >= b) {
n -= (b << s); ++y;
}
}
return y;
}
/* */
/* min/max */
/* */
static inline u64 min_u64(u64 a, u64 b) { return a < b ? a : b; }
static inline u64 max_u64(u64 a, u64 b) { return a > b ? a : b; }
static inline uint min_u(uint a, uint b) { return a < b ? a : b; }
static inline uint max_u(uint a, uint b) { return a > b ? a : b; }
/* */
/* alpha param */
/* */
static inline double get_default_alpha(u64 X) {
const u64 X_6 = usqrt_64(ucbrt_64(X));
const double lx = log((double)X), lx2 = lx*lx, lx3 = lx2*lx;
const double a = 0.000681*lx3 - 0.011846*lx2 + 0.044074*lx + 0.988365;
return a < 1 ? 1 : (a > X_6 ? X_6 : a);
}
//
// 128 bit implementation
//
#define T i128
#define TADD(x,y) ((x)+(y))
#define TSUB(x,y) ((x)-(y))
#define TMUL(x,y) ((x)*(y))
#define TCONV_64(x) ((T)(x))
#define TCONV_128(x) (x)
#define TNEG(x) (-(x))
#define F_POW 0
#define NAME count_primes_128
#include "sum_over_primes.inl"
#undef NAME
#undef F_POW
#define F_POW 1
#define NAME sum_primes_128
#include "sum_over_primes.inl"
#undef NAME
#undef F_POW
#define F_POW 2
#define NAME sum_primes2_128
#include "sum_over_primes.inl"
#undef NAME
#undef F_POW
#define F_POW 3
#define NAME sum_primes3_128
#include "sum_over_primes.inl"
#undef NAME
#undef F_POW
#define F_POW 4
#define NAME sum_primes4_128
#include "sum_over_primes.inl"
#undef NAME
#undef F_POW
#undef T
#undef TADD
#undef TSUB
#undef TMUL
#undef TCONV_64
#undef TCONV_128
#undef TNEG
//
// 256 bit implementation
//
#define T i256
#define TADD(x,y) i256_add(x,y)
#define TSUB(x,y) i256_sub(x,y)
#define TMUL(x,y) i256_mul(x,y)
#define TCONV_64(x) i256_from_i64(x)
#define TCONV_128(x) i256_from_i128(x)
#define TNEG(x) i256_neg(x)
#pragma omp declare reduction(+ : i256 : omp_out = TADD(omp_out,omp_in)) initializer(omp_priv=(i256){})
#define F_POW 0
#define NAME count_primes_256
#include "sum_over_primes.inl"
#undef NAME
#undef F_POW
#define F_POW 1
#define NAME sum_primes_256
#include "sum_over_primes.inl"
#undef NAME
#undef F_POW
#define F_POW 2
#define NAME sum_primes2_256
#include "sum_over_primes.inl"
#undef NAME
#undef F_POW
#define F_POW 3
#define NAME sum_primes3_256
#include "sum_over_primes.inl"
#undef NAME
#undef F_POW
#define F_POW 4
#define NAME sum_primes4_256
#include "sum_over_primes.inl"
#undef NAME
#undef F_POW
#undef T
#undef TADD
#undef TSUB
#undef TMUL
#undef TCONV_64
#undef TCONV_128
#undef TNEG
|
LISAutils.c | #include "LISAutils.h"
#include "omp.h"
/************ Global Parameters ************/
LISAParams* injectedparams = NULL;
LISAGlobalParams* globalparams = NULL;
LISAPrior* priorParams = NULL;
LISAAddParams* addparams = NULL;
double logZdata = 0.;
SimpleLikelihoodPrecomputedValues22* simplelikelihoodinjvals22 = NULL;
SimpleLikelihoodPrecomputedValuesHM* simplelikelihoodinjvalsHM = NULL;
/***************** Pasring string to choose what masses set to sample for *****************/
/* Function to convert string input SampleMassParams to tag */
SampleMassParamstag ParseSampleMassParamstag(char* string) {
SampleMassParamstag tag;
if(strcmp(string, "m1m2")==0) tag = m1m2;
else if(strcmp(string, "Mchirpeta")==0) tag = Mchirpeta;
else {
printf("Error in ParseSampleMassParamstag: string not recognized.\n");
exit(1);
}
return tag;
}
/* Superseded by sampleLframe */
// /* Function to convert string input SampleTimeParam to tag */
// SampleTimeParamtag ParseSampleTimeParamtag(char* string) {
// SampleTimeParamtag tag;
// if(strcmp(string, "tSSB")==0) tag = tSSB;
// else if(strcmp(string, "tL")==0) tag = tL;
// else {
// printf("Error in ParseSampleTimeParamtag: string not recognized.\n");
// exit(1);
// }
// return tag;
// }
/************ Functions to initalize and clean up structure for the signals ************/
void LISASignalCAmpPhase_Cleanup(LISASignalCAmpPhase* signal) {
if(signal->TDI1Signal) ListmodesCAmpPhaseFrequencySeries_Destroy(signal->TDI1Signal);
if(signal->TDI2Signal) ListmodesCAmpPhaseFrequencySeries_Destroy(signal->TDI2Signal);
if(signal->TDI3Signal) ListmodesCAmpPhaseFrequencySeries_Destroy(signal->TDI3Signal);
free(signal);
}
void LISASignalCAmpPhase_Init(LISASignalCAmpPhase** signal) {
if(!signal) exit(1);
/* Create storage for structures */
if(!*signal) *signal = malloc(sizeof(LISASignalCAmpPhase));
else
{
LISASignalCAmpPhase_Cleanup(*signal);
}
(*signal)->TDI1Signal = NULL;
(*signal)->TDI2Signal = NULL;
(*signal)->TDI3Signal = NULL;
}
void LISAInjectionCAmpPhase_Cleanup(LISAInjectionCAmpPhase* signal) {
if(signal->TDI1Splines) ListmodesCAmpPhaseSpline_Destroy(signal->TDI1Splines);
if(signal->TDI2Splines) ListmodesCAmpPhaseSpline_Destroy(signal->TDI2Splines);
if(signal->TDI3Splines) ListmodesCAmpPhaseSpline_Destroy(signal->TDI3Splines);
free(signal);
}
void LISAInjectionCAmpPhase_Init(LISAInjectionCAmpPhase** signal) {
if(!signal) exit(1);
/* Create storage for structures */
if(!*signal) *signal = malloc(sizeof(LISAInjectionCAmpPhase));
else
{
LISAInjectionCAmpPhase_Cleanup(*signal);
}
(*signal)->TDI1Splines = NULL;
(*signal)->TDI2Splines = NULL;
(*signal)->TDI3Splines = NULL;
}
void LISASignalReIm_Cleanup(LISASignalReIm* signal) {
if(signal->TDI1Signal) ReImFrequencySeries_Cleanup(signal->TDI1Signal);
if(signal->TDI2Signal) ReImFrequencySeries_Cleanup(signal->TDI2Signal);
if(signal->TDI3Signal) ReImFrequencySeries_Cleanup(signal->TDI3Signal);
free(signal);
}
void LISASignalReIm_Init(LISASignalReIm** signal) {
if(!signal) exit(1);
/* Create storage for structures */
if(!*signal) *signal = malloc(sizeof(LISASignalReIm));
else
{
LISASignalReIm_Cleanup(*signal);
}
(*signal)->TDI1Signal = NULL;
(*signal)->TDI2Signal = NULL;
(*signal)->TDI3Signal = NULL;
}
void LISAInjectionReIm_Cleanup(LISAInjectionReIm* signal) {
if(signal->TDI1Signal) ReImFrequencySeries_Cleanup(signal->TDI1Signal);
if(signal->TDI2Signal) ReImFrequencySeries_Cleanup(signal->TDI2Signal);
if(signal->TDI3Signal) ReImFrequencySeries_Cleanup(signal->TDI3Signal);
if(signal->freq) gsl_vector_free(signal->freq);
if(signal->noisevalues1) gsl_vector_free(signal->noisevalues1);
if(signal->noisevalues2) gsl_vector_free(signal->noisevalues2);
if(signal->noisevalues3) gsl_vector_free(signal->noisevalues3);
free(signal);
}
void LISAInjectionReIm_Init(LISAInjectionReIm** signal) {
if(!signal) exit(1);
/* Create storage for structures */
if(!*signal) *signal = malloc(sizeof(LISAInjectionReIm));
else
{
LISAInjectionReIm_Cleanup(*signal);
}
(*signal)->TDI1Signal = NULL;
(*signal)->TDI2Signal = NULL;
(*signal)->TDI3Signal = NULL;
(*signal)->freq = NULL;
(*signal)->noisevalues1 = NULL;
(*signal)->noisevalues2 = NULL;
(*signal)->noisevalues3 = NULL;
}
/************ Parsing arguments function ************/
/* This function must be called by Python scripts as soon as possible
to make sure global variables are set up with meaningful values */
void InitGlobalParams(void)
{
globalparams = (LISAGlobalParams *)malloc(sizeof(LISAGlobalParams));
memset(globalparams, 0, sizeof(LISAGlobalParams));
globalparams->fRef = 0.;
globalparams->deltatobs = 2.;
globalparams->minf = 0.;
globalparams->maxf = 1.;
globalparams->tagextpn = 1;
globalparams->tagtRefatLISA = 0;
globalparams->Mfmatch = 0.;
globalparams->nbmodeinj = 5;
globalparams->nbmodetemp = 5;
globalparams->tagint = 0;
globalparams->tagtdi = TDIAETXYZ;
globalparams->nbptsoverlap = 32768;
globalparams->variant = &LISAProposal;
globalparams->zerolikelihood = 0;
globalparams->frozenLISA = 0;
globalparams->responseapprox = full;
globalparams->tagsimplelikelihood22 = 0;
globalparams->tagsimplelikelihoodHM = 0;
injectedparams = (LISAParams *)malloc(sizeof(LISAParams));
memset(injectedparams, 0, sizeof(LISAParams));
injectedparams->tRef = 0.;
injectedparams->phiRef = 0.;
injectedparams->m1 = 2*1e6;
injectedparams->m2 = 1*1e6;
injectedparams->distance = 40*1e3;
injectedparams->lambda = 0.;
injectedparams->beta = 0.;
injectedparams->inclination = PI/3.;
injectedparams->polarization = 0.;
injectedparams->nbmode = globalparams->nbmodeinj;
}
/* Parse command line to initialize LISAParams, LISAPrior, and LISARunParams objects */
void parse_args_LISA(ssize_t argc, char **argv,
LISAParams* params,
LISAGlobalParams* globalparams,
LISAPrior* prior,
LISARunParams* run,
LISAAddParams* addparams)
{
char help[] = " \
LISAInference by Sylvain Marsat, John Baker, and Philip Graff\n Copyright July 2015\n \
\n\
This program performs rapid parameter estimation for LIGO and LISA CBC sources in the no-noise case.\n\
Arguments are as follows:\n\
\n\
--------------------------------------------------\n\
----- Injected Signal Parameters -----------------\n\
--------------------------------------------------\n\
--tRef Time at reference frequency (sec, default=0)\n\
--phiRef Orbital phase at reference frequency fRef (radians, default=0)\n\
--m1 Component mass 1 in Solar masses (larger, default=2e6)\n\
--m2 Component mass 2 in Solar masses (smaller, default=1e6)\n\
--distance Distance to source in Mpc (default=40*1e3)\n\
--lambda First angle for the position in the sky (radians, default=0)\n\
--beta Second angle for the position in the sky (radians, default=0)\n\
--inclination Inclination of source orbital plane to observer line of sight\n\
(radians, default=PI/3)\n\
--polarization Polarization of source (radians, default=0)\n\
--nbmode Number of modes of radiation to generate (1-5, default=5)\n\
--snr Use a target network SNR for the injection by rescaling distance\n\
\n\
-----------------------------------------------------------------\n\
----- Global Waveform/Inner products Parameters -----------------\n\
-----------------------------------------------------------------\n\
--fRef Reference frequency where phiRef is set (Hz, default=0, interpreted as Mf=0.14)\n\
--deltatobs Observation duration (years, default=2)\n\
--minf Minimal frequency (Hz, default=0) - when set to 0, use the lowest frequency where the detector noise model is trusted __LISASimFD_Noise_fLow (set somewhat arbitrarily)\n\
--maxf Maximal frequency (Hz, default=1Hz) - when set to 0, use the highest frequency where the detector noise model is trusted __LISASimFD_Noise_fHigh (set somewhat arbitrarily)\n\
--tagextpn Tag to allow PN extension of the waveform at low frequencies (default=1)\n\
--tagtRefatLISA Tag to allow t0 to specify signal offset time at LISA guiding center rather than at SSB (default=0)\n\
--Mfmatch When PN extension allowed, geometric matching frequency: will use ROM above this value. If <=0, use ROM down to the lowest covered frequency (default=0.)\n\
--setphiRefatfRef Flag for adjusting the FD phase at phiRef at the given fRef, which depends also on tRef - if false, treat phiRef simply as an orbital phase shift (minus an observer phase shift) (default=1)\n\
--nbmodeinj Number of modes of radiation to use for the injection (1-5, default=5)\n\
--nbmodetemp Number of modes of radiation to use for the templates (1-5, default=5)\n\
--tagint Tag choosing the integrator: 0 for Fresnel (default), 1 for linear integration\n\
--tagtdi Tag choosing the set of TDI variables to use (default TDIAETXYZ)\n\
--nbptsoverlap Number of points to use for linear integration (default 32768)\n\
--variant String representing the variant of LISA to be applied (default LISAProposal)\n\
--zerolikelihood Zero out the likelihood to sample from the prior for testing purposes (default 0)\n\
--frozenLISA Freeze the orbital configuration to the time of peak of the injection (default 0)\n\
--responseapprox Approximation in the GAB and orb response - choices are full (full response, default), lowfL (keep orbital delay frequency-dependence but simplify constellation response) and lowf (simplify constellation and orbital response) - WARNING : at the moment noises are not consistent, and TDI combinations from the GAB are unchanged\n\
--simplelikelihood22 Tag to use simplified, frozen-LISA and lowf likelihood where mode overlaps are precomputed - 22-mode only - can only be used when the masses and time (tL) are pinned to injection values (Note: when using --snr, distance adjustment done using responseapprox, not the simple response)\n\
--simplelikelihoodHM Tag to use simplified, frozen-LISA and lowf likelihood where mode overlaps are precomputed - set of modes - can only be used when the masses and time (tL) are pinned to injection values (Note: when using --snr, distance adjustment done using responseapprox, not the simple response)\n\
\n\
--------------------------------------------------\n\
----- Prior Boundary Settings --------------------\n\
--------------------------------------------------\n\
--samplemassparams Choose the set of mass params to sample from - options are m1m2 and Mchirpeta (default m1m2)\n\
--sampletimeparam DEPRECATED - Choose the time param to sample from - options are tSSB and tL (default tSSB)\n\
--sampleLframe flag to sample L-frame params tL, lambdaL, betaL, psiL instead of SSB-frame params -- priors are interpreted for those L-frame params -- no phase transformation -- SSB params are reported in output (default False)\n\
--deltaT Half-width of time prior (sec, default=1e5)\n\
--comp-min Minimum component mass in Solar masses - when sampling m1m2 (default=1e4)\n\
--comp-max Maximum component mass in Solar masses - when sampling m1m2 (default=1e8)\n\
--mtot-min Minimum total mass in Solar masses - when sampling m1m2 (default=5e4)\n\
--mtot-max Maximum total mass in Solar masses - when sampling m1m2 (default=1e8)\n\
--q-max Maximum mass ratio, m1/m2 - when sampling m1m2 (default=11.98, minimum is 1)\n\
--Mchirp-min Minimum chirp mass in Solar masses - when sampling Mchirpeta (default=2e4)\n\
--Mchirp-max Maximum chirp mass in Solar masses - when sampling Mchirpeta (default=4e7)\n\
--eta-min Minimum symmetric mass ratio eta - when sampling Mchirpeta (default=0.072)\n\
--eta-max Maximum symmetric mass ratio eta - when sampling Mchirpeta (default=0.25)\n\
--dist-min Minimum distance to source (Mpc, default=100)\n\
--dist-max Maximum distance to source (Mpc, default=40*1e3)\n\
--rescale-distprior In case a target SNR is given with --snr, rescale dist-min and dist-max accordingly\n\
--logflat-massprior Uses uniform (natural) log M, rather than uniform M - applies to m1/m2 or Mchirp (default false)\n\
--flat-distprior Uses uniform linear scaled distance, rather than ~ DL^2\n\
Parameters lambda, beta, phase, pol, inc can also ge given min and max values (for testing)\n\
Syntax: --PARAM-min\n\
\n\
--------------------------------------------------\n\
----- Fix Parameters In Sampling -----------------\n\
--------------------------------------------------\n\
--pin-PARAM Pin indicated parameter to injected value\n\
--fix-PARAM Fix indicated parameter to specified value\n\
Available parameter names are:\n\
m1 Mass 1 (MSol) - used only when sampling masses as m1/m2\n\
m2 Mass 2 (MSol) - used only when sampling masses as m1/m2\n\
Mchirp Chirp mass (MSol) - used only when sampling masses as Mchirp/eta\n\
eta Symmetric mass ratio - used only when sampling masses as Mchirp/eta\n\
dist Distance (luminosity, Mpc)\n\
time Reference time (GPS sec)\n\
phase Reference orbital phase (rad)\n\
lambda First angle for the position in the sky (rad)\n\
beta Second angle for the position in the sky (rad)\n\
inc Inclination of orbital plane to observer (rad)\n\
pol Polarization (rad)\n\
Note: --pin-PARAM overrides --fix-PARAM\n\
\n\
--------------------------------------------------\n\
----- BAMBI Sampler Settings ---------------------\n\
--------------------------------------------------\n\
--eff Target efficiency of sampling (default=0.1)\n\
--tol Tolerance for evidence calculation convergence (default=0.5)\n\
--consteff Option to use constant efficiency mode\n\
--nlive Number of live points for sampling (default=1000)\n\
--bambi Use BAMBI's neural network logL learning (no option, default off)\n\
--resume Resume from a previous run (no option, default off)\n\
--maxiter Maximum number of iterations - if 0, use convergence criterion to stop (default 0)\n\
--writeparams Write params - if 1, write run parameters to file (default 1)\n\
--outroot Root for output files (default='chains/LISAinference_')\n\
--netfile Neural network settings file if using --bambi (default='LISAinference.inp')\n\
--mmodal Use multimodal decomposition (no option, default off)\n\
--maxcls Max number of modes in multimodal decomposition (default 1)\n\
--nclspar Number of parameters to use for multimodal decomposition - in the order of the cube (default 1)\n\
--ztol In multimodal decomposition, modes with lnZ lower than ztol are ignored (default -1e90)\n\
--seed Seed the inference by setting one of the live points to the injection (no option, default off)\n\
-----------------------------------------------------------------\n\
----- Additional Parameters -------------------------------------\n\
-----------------------------------------------------------------\n\
--addparams To be followed by the value of parameters: m1 m2 tRef distance phiRef inclination lambda beta polarization. Used to compute a likelihood for these parameters in LISAlikelihood. Not used in LISAinference.\n\
--loadparamsfile Option to load a list of template parameters from file and to output results to file (default false).\n\
--nlinesparams Number of lines in input params file.\n\
--indir Input directory when loading input parameters file from file for LISAlikelihood.\n\
--infile Input file name when loading input parameters file from file for LISAlikelihood.\n\
--outdir Directory for input/output file.\n\
--outfile Input file with the parameters.\n\
\n";
ssize_t i;
/* set default values for the injection params */
params->tRef = 0.;
params->phiRef = 0.;
params->m1 = 2*1e6;
params->m2 = 1*1e6;
params->distance = 40*1e3;
params->lambda = 0.;
params->beta = 0.;
params->inclination = PI/3.;
params->polarization = 0.;
params->nbmode = 5;
/* set default values for the global params */
globalparams->fRef = 0.;
globalparams->deltatobs = 2.;
globalparams->minf = 0.;
globalparams->maxf = 1.;
globalparams->tagextpn = 1;
globalparams->tagtRefatLISA = 0;
globalparams->Mfmatch = 0.;
globalparams->setphiRefatfRef = 1;
globalparams->nbmodeinj = 5;
globalparams->nbmodetemp = 5;
globalparams->tagint = 0;
globalparams->tagtdi = TDIAETXYZ;
globalparams->nbptsoverlap = 32768;
globalparams->variant = &LISAProposal;
globalparams->zerolikelihood = 0;
globalparams->frozenLISA = 0;
globalparams->responseapprox = full;
globalparams->tagsimplelikelihood22 = 0;
globalparams->tagsimplelikelihoodHM = 0;
/* set default values for the prior limits */
prior->samplemassparams = m1m2;
//prior->sampletimeparam = tSSB; /* DEPRECATED */
prior->sampleLframe = 0;
prior->deltaT = 3600.;
prior->comp_min = 1e4;
prior->comp_max = 1e8;
prior->mtot_min = 5e4;
prior->mtot_max = 1e8;
prior->qmax = 11.98;
prior->Mchirp_min = 2e4;
prior->Mchirp_max = 4e7;
prior->eta_min = 0.072;
prior->eta_max = 0.25;
prior->dist_min = 1e3;
prior->dist_max = 400*1e3;
prior->lambda_min = 0.;
prior->lambda_max = 2*PI;
prior->beta_min = -PI/2.;
prior->beta_max = PI/2.;
prior->phase_min = 0.;
prior->phase_max = 2*PI;
prior->pol_min = 0.;
prior->pol_max = PI;
prior->inc_min = 0.;
prior->inc_max = PI;
prior->fix_m1 = NAN;
prior->fix_m2 = NAN;
prior->fix_Mchirp = NAN;
prior->fix_eta = NAN;
prior->fix_dist = NAN;
prior->fix_time = NAN;
prior->fix_phase = NAN;
prior->fix_pol = NAN;
prior->fix_lambda = NAN;
prior->fix_beta = NAN;
prior->fix_inc = NAN;
prior->pin_m1 = 0;
prior->pin_m2 = 0;
prior->pin_Mchirp = 0;
prior->pin_eta = 0;
prior->pin_dist = 0;
prior->pin_time = 0;
prior->pin_phase = 0;
prior->pin_pol = 0;
prior->pin_lambda = 0;
prior->pin_beta = 0;
prior->pin_inc = 0;
prior->snr_target = NAN;
prior->rescale_distprior = 0;
prior->flat_distprior = 0;
prior->logflat_massprior = 0;
/* set default values for the run settings */
run->eff = 0.5;
run->tol = 0.5;
run->consteff = 0;
run->nlive = 1000;
run->writeparams = 1;
strcpy(run->outroot, "chains/LISAinference_");
run->bambi = 0;
run->resume = 0;
run->maxiter = 0;
strcpy(run->netfile, "LISAinference.inp");
run->mmodal = 0;
run->maxcls = 1;
run->nclspar = 1;
run->ztol = -1e90;
run->seed = 0;
/* set default values for the additional params */
addparams->tRef = 0.;
addparams->phiRef = 0.;
addparams->m1 = 2*1e6;
addparams->m2 = 1*1e6;
addparams->distance = 40*1e3;
addparams->lambda = 0.;
addparams->beta = 0.;
addparams->inclination = PI/3.;
addparams->polarization = 0.;
/* Note: nbmode used is nbmodetemp from globalparams */
addparams->loadparamsfile = 0;
addparams->nlinesparams = 0; /* No default; has to be provided */
strcpy(addparams->indir, ""); /* No default; has to be provided */
strcpy(addparams->infile, ""); /* No default; has to be provided */
strcpy(addparams->outdir, ""); /* No default; has to be provided */
strcpy(addparams->outfile, ""); /* No default; has to be provided */
/* Consume command line */
for (i = 1; i < argc; ++i) {
if (strcmp(argv[i], "--help") == 0) {
fprintf(stdout,"%s", help);
exit(0);
} else if (strcmp(argv[i], "--tRef") == 0) {
params->tRef = atof(argv[++i]);
} else if (strcmp(argv[i], "--phiRef") == 0) {
params->phiRef = atof(argv[++i]);
} else if (strcmp(argv[i], "--m1") == 0) {
params->m1 = atof(argv[++i]);
} else if (strcmp(argv[i], "--m2") == 0) {
params->m2 = atof(argv[++i]);
} else if (strcmp(argv[i], "--distance") == 0) {
params->distance = atof(argv[++i]);
} else if (strcmp(argv[i], "--lambda") == 0) {
params->lambda = atof(argv[++i]);
} else if (strcmp(argv[i], "--beta") == 0) {
params->beta = atof(argv[++i]);
} else if (strcmp(argv[i], "--inclination") == 0) {
params->inclination = atof(argv[++i]);
} else if (strcmp(argv[i], "--polarization") == 0) {
params->polarization = atof(argv[++i]);
} else if (strcmp(argv[i], "--fRef") == 0) {
globalparams->fRef = atof(argv[++i]);
} else if (strcmp(argv[i], "--deltatobs") == 0) {
globalparams->deltatobs = atof(argv[++i]);
} else if (strcmp(argv[i], "--minf") == 0) {
globalparams->minf = atof(argv[++i]);
} else if (strcmp(argv[i], "--maxf") == 0) {
globalparams->maxf = atof(argv[++i]);
} else if (strcmp(argv[i], "--tagextpn") == 0) {
globalparams->tagextpn = atoi(argv[++i]);
} else if (strcmp(argv[i], "--tagtRefatLISA") == 0) {
globalparams->tagtRefatLISA = atoi(argv[++i]);
} else if (strcmp(argv[i], "--Mfmatch") == 0) {
globalparams->Mfmatch = atof(argv[++i]);
} else if (strcmp(argv[i], "--setphiRefatfRef") == 0) {
globalparams->setphiRefatfRef = atof(argv[++i]);
} else if (strcmp(argv[i], "--nbmodeinj") == 0) {
globalparams->nbmodeinj = atoi(argv[++i]);
} else if (strcmp(argv[i], "--nbmodetemp") == 0) {
globalparams->nbmodetemp = atoi(argv[++i]);
} else if (strcmp(argv[i], "--tagint") == 0) {
globalparams->tagint = atoi(argv[++i]);
} else if (strcmp(argv[i], "--tagtdi") == 0) {
globalparams->tagtdi = ParseTDItag(argv[++i]);
} else if (strcmp(argv[i], "--nbptsoverlap") == 0) {
globalparams->nbptsoverlap = atoi(argv[++i]);
} else if (strcmp(argv[i], "--zerolikelihood") == 0) {
globalparams->zerolikelihood = 1;
} else if (strcmp(argv[i], "--frozenLISA") == 0) {
globalparams->frozenLISA = 1;
} else if (strcmp(argv[i], "--responseapprox") == 0) {
globalparams->responseapprox = ParseResponseApproxtag(argv[++i]);
} else if (strcmp(argv[i], "--simplelikelihood22") == 0) {
globalparams->tagsimplelikelihood22 = 1;
} else if (strcmp(argv[i], "--simplelikelihoodHM") == 0) {
globalparams->tagsimplelikelihoodHM = 1;
} else if (strcmp(argv[i], "--samplemassparams") == 0) {
prior->samplemassparams = ParseSampleMassParamstag(argv[++i]);
} else if (strcmp(argv[i], "--sampletimeparam") == 0) {
printf("WARNING: argument sampletimeparam is deprecated, superseded by sampleLframe -- ignored.");
++i;
//prior->sampletimeparam = ParseSampleTimeParamtag(argv[++i]);
} else if (strcmp(argv[i], "--sampleLframe") == 0) {
prior->sampleLframe = 1;
} else if (strcmp(argv[i], "--deltaT") == 0) {
prior->deltaT = atof(argv[++i]);
} else if (strcmp(argv[i], "--comp-min") == 0) {
prior->comp_min = atof(argv[++i]);
} else if (strcmp(argv[i], "--comp-max") == 0) {
prior->comp_max = atof(argv[++i]);
} else if (strcmp(argv[i], "--mtot-min") == 0) {
prior->mtot_min = atof(argv[++i]);
} else if (strcmp(argv[i], "--mtot-max") == 0) {
prior->mtot_max = atof(argv[++i]);
} else if (strcmp(argv[i], "--q-max") == 0) {
prior->qmax = atof(argv[++i]);
} else if (strcmp(argv[i], "--Mchirp-min") == 0) {
prior->Mchirp_min = atof(argv[++i]);
} else if (strcmp(argv[i], "--Mchirp-max") == 0) {
prior->Mchirp_max = atof(argv[++i]);
} else if (strcmp(argv[i], "--eta-min") == 0) {
prior->eta_min = atof(argv[++i]);
} else if (strcmp(argv[i], "--eta-max") == 0) {
prior->eta_max = atof(argv[++i]);
} else if (strcmp(argv[i], "--dist-min") == 0) {
prior->dist_min = atof(argv[++i]);
} else if (strcmp(argv[i], "--dist-max") == 0) {
prior->dist_max = atof(argv[++i]);
} else if (strcmp(argv[i], "--lambda-min") == 0) {
prior->lambda_min = atof(argv[++i]);
} else if (strcmp(argv[i], "--lambda-max") == 0) {
prior->lambda_max = atof(argv[++i]);
} else if (strcmp(argv[i], "--beta-min") == 0) {
prior->beta_min = atof(argv[++i]);
} else if (strcmp(argv[i], "--beta-max") == 0) {
prior->beta_max = atof(argv[++i]);
} else if (strcmp(argv[i], "--phase-min") == 0) {
prior->phase_min = atof(argv[++i]);
} else if (strcmp(argv[i], "--phase-max") == 0) {
prior->phase_max = atof(argv[++i]);
} else if (strcmp(argv[i], "--pol-min") == 0) {
prior->pol_min = atof(argv[++i]);
} else if (strcmp(argv[i], "--pol-max") == 0) {
prior->pol_max = atof(argv[++i]);
} else if (strcmp(argv[i], "--inc-min") == 0) {
prior->inc_min = atof(argv[++i]);
} else if (strcmp(argv[i], "--inc-max") == 0) {
prior->inc_max = atof(argv[++i]);
} else if (strcmp(argv[i], "--fix-m1") == 0) {
prior->fix_m1 = atof(argv[++i]);
} else if (strcmp(argv[i], "--fix-m2") == 0) {
prior->fix_m2 = atof(argv[++i]);
} else if (strcmp(argv[i], "--fix-Mchirp") == 0) {
prior->fix_Mchirp = atof(argv[++i]);
} else if (strcmp(argv[i], "--fix-eta") == 0) {
prior->fix_eta = atof(argv[++i]);
} else if (strcmp(argv[i], "--fix-dist") == 0) {
prior->fix_dist = atof(argv[++i]);
} else if (strcmp(argv[i], "--fix-lambda") == 0) {
prior->fix_lambda = atof(argv[++i]);
} else if (strcmp(argv[i], "--fix-beta") == 0) {
prior->fix_beta = atof(argv[++i]);
} else if (strcmp(argv[i], "--fix-time") == 0) {
prior->fix_time = atof(argv[++i]);
} else if (strcmp(argv[i], "--fix-phase") == 0) {
prior->fix_phase = atof(argv[++i]);
} else if (strcmp(argv[i], "--fix-inc") == 0) {
prior->fix_inc = atof(argv[++i]);
} else if (strcmp(argv[i], "--fix-pol") == 0) {
prior->fix_pol = atof(argv[++i]);
} else if (strcmp(argv[i], "--pin-m1") == 0) {
prior->pin_m1 = 1;
} else if (strcmp(argv[i], "--pin-m2") == 0) {
prior->pin_m2 = 1;
} else if (strcmp(argv[i], "--pin-Mchirp") == 0) {
prior->pin_Mchirp = 1;
} else if (strcmp(argv[i], "--pin-eta") == 0) {
prior->pin_eta = 1;
} else if (strcmp(argv[i], "--pin-dist") == 0) {
prior->pin_dist = 1;
} else if (strcmp(argv[i], "--pin-lambda") == 0) {
prior->pin_lambda = 1;
} else if (strcmp(argv[i], "--pin-beta") == 0) {
prior->pin_beta = 1;
} else if (strcmp(argv[i], "--pin-time") == 0) {
prior->pin_time = 1;
} else if (strcmp(argv[i], "--pin-phase") == 0) {
prior->pin_phase = 1;
} else if (strcmp(argv[i], "--pin-inc") == 0) {
prior->pin_inc = 1;
} else if (strcmp(argv[i], "--pin-pol") == 0) {
prior->pin_pol = 1;
} else if (strcmp(argv[i], "--snr") == 0) {
prior->snr_target = atof(argv[++i]);
} else if (strcmp(argv[i], "--rescale-distprior") == 0) {
prior->rescale_distprior = 1;
} else if (strcmp(argv[i], "--flat-distprior") == 0) {
prior->flat_distprior = 1;
} else if (strcmp(argv[i], "--logflat-massprior") == 0) {
prior->logflat_massprior = 1;
} else if (strcmp(argv[i], "--eff") == 0) {
run->eff = atof(argv[++i]);
} else if (strcmp(argv[i], "--tol") == 0) {
run->tol = atof(argv[++i]);
} else if (strcmp(argv[i], "--consteff") == 0) {
run->consteff = 1;
} else if (strcmp(argv[i], "--nlive") == 0) {
run->nlive = atoi(argv[++i]);
} else if (strcmp(argv[i], "--bambi") == 0) {
run->bambi = 1;
} else if (strcmp(argv[i], "--resume") == 0) {
run->resume = 1;
} else if (strcmp(argv[i], "--maxiter") == 0) {
run->maxiter = atoi(argv[++i]);
} else if (strcmp(argv[i], "--writeparams") == 0) {
run->writeparams = atoi(argv[++i]);
} else if (strcmp(argv[i], "--outroot") == 0) {
strcpy(run->outroot, argv[++i]);
} else if (strcmp(argv[i], "--netfile") == 0) {
strcpy(run->netfile, argv[++i]);
} else if (strcmp(argv[i], "--mmodal") == 0) {
run->mmodal = 1;
} else if (strcmp(argv[i], "--maxcls") == 0) {
run->maxcls = atoi(argv[++i]);
} else if (strcmp(argv[i], "--nclspar") == 0) {
run->nclspar = atoi(argv[++i]);
} else if (strcmp(argv[i], "--ztol") == 0) {
run->ztol = atof(argv[++i]);
} else if (strcmp(argv[i], "--seed") == 0) {
run->seed = 1;
} else if (strcmp(argv[i], "--variant") == 0) {
i++;
if (strcmp(argv[i], "LISAProposal") == 0) globalparams->variant = &LISAProposal;
else if (strcmp(argv[i], "LISA2017") == 0) globalparams->variant = &LISA2017;
else if (strcmp(argv[i], "LISA2010") == 0) globalparams->variant = &LISA2010;
else if (strcmp(argv[i], "fastOrbitLISA") == 0) globalparams->variant = &fastOrbitLISA;
else if (strcmp(argv[i], "slowOrbitLISA") == 0) globalparams->variant = &slowOrbitLISA;
else if (strcmp(argv[i], "tinyOrbitLISA") == 0) globalparams->variant = &tinyOrbitLISA;
else if (strcmp(argv[i], "bigOrbitLISA") == 0) globalparams->variant = &bigOrbitLISA;
else {
printf("Error: --variant option '%s' not recognized\n",argv[i]);
exit(1);
}
} else if (strcmp(argv[i], "--addparams") == 0) {
/* Must be followed by the values of m1 m2 tRef distance phiRef inclination lambda beta polarization */
addparams->m1 = atof(argv[++i]);
addparams->m2 = atof(argv[++i]);
addparams->tRef = atof(argv[++i]);
addparams->distance = atof(argv[++i]);
addparams->phiRef = atof(argv[++i]);
addparams->inclination = atof(argv[++i]);
addparams->lambda = atof(argv[++i]);
addparams->beta = atof(argv[++i]);
addparams->polarization = atof(argv[++i]);
} else if (strcmp(argv[i], "--loadparamsfile") == 0) {
addparams->loadparamsfile = 1;
} else if (strcmp(argv[i], "--nlinesparams") == 0) {
addparams->nlinesparams = atoi(argv[++i]);
} else if (strcmp(argv[i], "--indir") == 0) {
strcpy(addparams->indir, argv[++i]);
} else if (strcmp(argv[i], "--infile") == 0) {
strcpy(addparams->infile, argv[++i]);
} else if (strcmp(argv[i], "--outdir") == 0) {
strcpy(addparams->outdir, argv[++i]);
} else if (strcmp(argv[i], "--outfile") == 0) {
strcpy(addparams->outfile, argv[++i]);
} else {
printf("Error: invalid option: %s\n", argv[i]);
exit(1);
}
}
/* Set frequency interval to default values */
if(globalparams->minf==0.) globalparams->minf = __LISASimFD_Noise_fLow;
if(globalparams->maxf==0.) globalparams->maxf = __LISASimFD_Noise_fHigh;
/* Enforce eta_max <= 0.25 */
prior->eta_max = fmin(0.25, prior->eta_max);
/* If fixing one of the masses, adjust the prior range according to q>=1 */
/* Avoids drawing past the boundary - but PriorBoundaryCheck would also reject all these draws */
if(!isnan(priorParams->fix_m1)) priorParams->comp_max = fmax(priorParams->comp_max, priorParams->fix_m1);
if(!isnan(priorParams->fix_m2)) priorParams->comp_min = fmax(priorParams->comp_min, priorParams->fix_m2);
/* Simplified likelihood options 22 and HM are exclusive to avoid ambiguity */
if(globalparams->tagsimplelikelihood22 && globalparams->tagsimplelikelihoodHM) {
printf("Error in parse_args_LISA: using tags for both both simplified likelihood 22 and HM - inconsistent.");
exit(1);
}
/* If using the simplified likelihood, make sure that the masses and time are pinned to injection values - otherwise inconsistent */
/* NOTE: slight inconsistency, */
if(globalparams->tagsimplelikelihood22 || globalparams->tagsimplelikelihoodHM) {
if((priorParams->pin_m1==0) || (priorParams->pin_m2==0) || (priorParams->pin_time==0)) {
printf("Error in parse_args_LISA: using simplified likelihood while m1, m2 or tRef is not pinned to injection value - inconsistent.");
exit(1);
}
}
return;
}
/* Function printing injection/signal parameters to stdout */
void report_LISAParams(
LISAParams* params)
{
/* Print injection parameters (before possible rescaling of the distances to match the target snr) */
printf( "-----------------------------------------------\n");
printf( "m1: %.16e\n", params->m1);
printf( "m2: %.16e\n", params->m2);
printf( "tRef: %.16e\n", params->tRef);
printf( "phiRef: %.16e\n", params->phiRef);
printf( "distance: %.16e\n", params->distance);
printf( "lambda: %.16e\n", params->lambda);
printf( "beta: %.16e\n", params->beta);
printf( "inclination: %.16e\n", params->inclination);
printf( "polarization: %.16e\n", params->polarization);
printf( "-----------------------------------------------\n");
printf( "\n");
}
/* Function printing all parameters of the run to an output file for future reference */
int print_parameters_to_file_LISA(
LISAParams* params,
LISAGlobalParams* globalparams,
LISAPrior* prior,
LISARunParams* run)
{
/* Output file */
char *path=malloc(strlen(run->outroot)+64);
sprintf(path,"%sparams.txt", run->outroot);
FILE *f = fopen(path, "w");
if(f==NULL){printf("Failed to open file:%s\n",path);}
/* Print injection parameters (before possible rescaling of the distances to match the target snr) */
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "Injection parameters:\n");
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "m1: %.16e\n", params->m1);
fprintf(f, "m2: %.16e\n", params->m2);
fprintf(f, "tRef: %.16e\n", params->tRef);
fprintf(f, "phiRef: %.16e\n", params->phiRef);
fprintf(f, "distance: %.16e\n", params->distance);
fprintf(f, "lambda: %.16e\n", params->lambda);
fprintf(f, "beta: %.16e\n", params->beta);
fprintf(f, "inclination: %.16e\n", params->inclination);
fprintf(f, "polarization: %.16e\n", params->polarization);
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "\n");
/* Print global parameters */
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "Global parameters:\n");
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "fRef: %.16e\n", globalparams->fRef);
fprintf(f, "deltatobs: %.16e\n", globalparams->deltatobs);
fprintf(f, "minf: %.16e\n", globalparams->minf);
fprintf(f, "maxf: %.16e\n", globalparams->maxf);
fprintf(f, "tagextpn: %.16e\n", globalparams->tagextpn);
fprintf(f, "Mfmatch: %.16e\n", globalparams->Mfmatch);
fprintf(f, "nbmodeinj: %d\n", globalparams->nbmodeinj);
fprintf(f, "nbmodetemp: %d\n", globalparams->nbmodetemp);
fprintf(f, "tagint: %d\n", globalparams->tagint);
fprintf(f, "tagtdi: %d\n", globalparams->tagtdi); //Translation back from enum to string not implemented yet
fprintf(f, "nbptsoverlap: %d\n", globalparams->nbptsoverlap);
fprintf(f, "zerolikelihood: %d\n", globalparams->zerolikelihood);
fprintf(f, "frozenLISA: %d\n", globalparams->frozenLISA);
fprintf(f, "responseapprox: %d\n", globalparams->responseapprox);
fprintf(f, "simplelikelihood22: %d\n", globalparams->tagsimplelikelihood22);
fprintf(f, "simplelikelihoodHM: %d\n", globalparams->tagsimplelikelihoodHM);
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "\n");
/* Print prior parameters */
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "Prior parameters:\n");
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "samplemassparams: %.16e\n", prior->samplemassparams);
//fprintf(f, "sampletimeparam: %.16e\n", prior->sampletimeparam);
fprintf(f, "sampleLframe: %d\n", prior->sampleLframe);
fprintf(f, "deltaT: %.16e\n", prior->deltaT);
fprintf(f, "comp_min: %.16e\n", prior->comp_min);
fprintf(f, "comp_max: %.16e\n", prior->comp_max);
fprintf(f, "mtot_min: %.16e\n", prior->mtot_min);
fprintf(f, "mtot_max: %.16e\n", prior->mtot_max);
fprintf(f, "qmax: %.16e\n", prior->qmax);
fprintf(f, "Mchirp_min: %.16e\n", prior->Mchirp_min);
fprintf(f, "Mchirp_max: %.16e\n", prior->Mchirp_max);
fprintf(f, "eta_min: %.16e\n", prior->eta_min);
fprintf(f, "eta_max: %.16e\n", prior->eta_max);
fprintf(f, "dist_min: %.16e\n", prior->dist_min);
fprintf(f, "dist_max: %.16e\n", prior->dist_max);
fprintf(f, "lambda_min: %.16e\n", prior->lambda_min);
fprintf(f, "lambda_max: %.16e\n", prior->lambda_max);
fprintf(f, "beta_min: %.16e\n", prior->beta_min);
fprintf(f, "beta_max: %.16e\n", prior->beta_max);
fprintf(f, "phase_min: %.16e\n", prior->phase_min);
fprintf(f, "phase_max: %.16e\n", prior->phase_max);
fprintf(f, "pol_min: %.16e\n", prior->pol_min);
fprintf(f, "pol_max: %.16e\n", prior->pol_max);
fprintf(f, "inc_min: %.16e\n", prior->inc_min);
fprintf(f, "inc_max: %.16e\n", prior->inc_max);
fprintf(f, "fix_m1: %.16e\n", prior->fix_m1);
fprintf(f, "fix_m2: %.16e\n", prior->fix_m2);
fprintf(f, "fix_Mchirp: %.16e\n", prior->fix_Mchirp);
fprintf(f, "fix_eta: %.16e\n", prior->fix_eta);
fprintf(f, "fix_dist: %.16e\n", prior->fix_dist);
fprintf(f, "fix_time: %.16e\n", prior->fix_time);
fprintf(f, "fix_phase: %.16e\n", prior->fix_phase);
fprintf(f, "fix_pol: %.16e\n", prior->fix_pol);
fprintf(f, "fix_lambda: %.16e\n", prior->fix_lambda);
fprintf(f, "fix_beta: %.16e\n", prior->fix_beta);
fprintf(f, "fix_inc: %.16e\n", prior->fix_inc);
fprintf(f, "pin_m1: %d\n", prior->pin_m1);
fprintf(f, "pin_m2: %d\n", prior->pin_m2);
fprintf(f, "pin_Mchirp: %d\n", prior->pin_Mchirp);
fprintf(f, "pin_eta: %d\n", prior->pin_eta);
fprintf(f, "pin_dist: %d\n", prior->pin_dist);
fprintf(f, "pin_time: %d\n", prior->pin_time);
fprintf(f, "pin_phase: %d\n", prior->pin_phase);
fprintf(f, "pin_pol: %d\n", prior->pin_pol);
fprintf(f, "pin_lambda: %d\n", prior->pin_lambda);
fprintf(f, "pin_beta: %d\n", prior->pin_beta);
fprintf(f, "pin_inc: %d\n", prior->pin_inc);
fprintf(f, "snr_target: %.16e\n", prior->snr_target);
fprintf(f, "rescale_distprior: %d\n", prior->rescale_distprior);
fprintf(f, "flat-distprior: %d\n", prior->flat_distprior);
fprintf(f, "logflat-massprior: %d\n", prior->logflat_massprior);
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "\n");
/* Print run parameters */
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "Run parameters:\n");
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "eff: %g\n", run->eff);
fprintf(f, "tol: %g\n", run->tol);
fprintf(f, "consteff: %d\n", run->consteff);
fprintf(f, "nlive: %d\n", run->nlive);
fprintf(f, "bambi: %d\n", run->bambi);
fprintf(f, "resume: %d\n", run->resume);
fprintf(f, "maxiter: %d\n", run->maxiter);
fprintf(f, "mmodal: %d\n", run->mmodal);
fprintf(f, "maxcls: %d\n", run->maxcls);
fprintf(f, "nclspar: %d\n", run->nclspar);
fprintf(f, "ztol: %g\n", run->ztol);
fprintf(f, "seed: %d\n", run->seed);
fprintf(f, "-----------------------------------------------\n");
/* Close output file */
fclose(f);
return SUCCESS;
}
/* Function printing distance parameters (used if they have been rescaled to a target snr) */
int print_rescaleddist_to_file_LISA(
LISAParams* params,
LISAGlobalParams* globalparams,
LISAPrior* prior,
LISARunParams* run)
{
printf("Saving distance rescaling info to file.\n");
/* Output file */
char *path=malloc(strlen(run->outroot)+64);
sprintf(path,"%sparams.txt", run->outroot);
FILE *f = fopen(path, "a");
if (f == NULL) printf("Error. Failed to open file '%s'\n",path);
/* Print rescaled distance and dist prior */
fprintf(f, "\n");
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "Rescaled dist parameters:\n");
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "dist_resc: %.16e\n", params->distance);
fprintf(f, "dist_min: %.16e\n", prior->dist_min);
fprintf(f, "dist_max: %.16e\n", prior->dist_max);
fprintf(f, "-----------------------------------------------\n");
/* Close output file */
fclose(f);
return SUCCESS;
}
/* Function printing distance parameters (used if they have been rescaled to a target snr) */
int print_snrlogZ_to_file_LISA(LISARunParams* run, double SNR, double logZ)
{
printf("Saving injection SNR and logZ info to file.\n");
/* Output file */
char *path=malloc(strlen(run->outroot)+64);
sprintf(path,"%sparams.txt", run->outroot);
FILE *f = fopen(path, "a");
if (f == NULL) printf("Error. Failed to open file '%s'\n",path);
/* Print rescaled distance and dist prior */
fprintf(f, "\n");
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "SNR and logZ of the injection:\n");
fprintf(f, "-----------------------------------------------\n");
fprintf(f, "SNR: %.16e\n", SNR);
fprintf(f, "logZ: %.16e\n", logZ);
fprintf(f, "-----------------------------------------------\n");
/* Close output file */
fclose(f);
return SUCCESS;
}
/******** Trim modes that are out of range ********/
int listmodesCAmpPhaseTrim(ListmodesCAmpPhaseFrequencySeries* listSeries){
//return SUCCESS;
double maximum_freq=0.2;//For now we set this by hand. May also extent to cut low-freq end as well...
ListmodesCAmpPhaseFrequencySeries* listelem = listSeries;
int i;
while(listelem){
gsl_vector* freq = listelem->freqseries->freq;
gsl_vector* amp_real = listelem->freqseries->amp_real;
gsl_vector* amp_imag = listelem->freqseries->amp_imag;
gsl_vector* phase = listelem->freqseries->phase;
int len = (int) freq->size;
for(i=0;i<len;i++)if(gsl_vector_get(freq,i)>maximum_freq)break;
int len_new=i+1;
if(len_new<5)len_new=5; //just to be on the safe side avoiding near zero-length case.
if(len_new<len){//Trim
CAmpPhaseFrequencySeries *freqseries_new = 0;
CAmpPhaseFrequencySeries_Init( &freqseries_new,len_new);
for(i=0;i<len_new;i++) {
gsl_vector_set(freqseries_new->freq, i, gsl_vector_get(freq,i));
gsl_vector_set(freqseries_new->amp_real, i, gsl_vector_get(amp_real,i));
gsl_vector_set(freqseries_new->amp_imag, i, gsl_vector_get(amp_imag,i));
gsl_vector_set(freqseries_new->phase, i, gsl_vector_get(phase,i));
}
//printf("Trimming frequencies:\n %g<f[i<%i]<%g --> %g<f[i<%i]<%g\n",freq->data[0],len-1,freq->data[len-1],freqseries_new->freq->data[0],len_new-1,freqseries_new->freq->data[len_new-1]);
CAmpPhaseFrequencySeries_Cleanup(listelem->freqseries);
listelem->freqseries=freqseries_new;
}
listelem=listelem->next;
}
return SUCCESS;
}
/************************* Functions to generate signals and compute likelihoods **************************/
/* Function generating a LISA signal as a list of modes in CAmp/Phase form, from LISA parameters */
int LISAGenerateSignalCAmpPhase(
struct tagLISAParams* params, /* Input: set of LISA parameters of the signal */
struct tagLISASignalCAmpPhase* signal) /* Output: structure for the generated signal */
{
//
//printf("in LISAGenerateSignalCAmpPhase: tRef= %g\n", params->tRef);
int ret;
ListmodesCAmpPhaseFrequencySeries* listROM = NULL;
ListmodesCAmpPhaseFrequencySeries* listTDI1 = NULL;
ListmodesCAmpPhaseFrequencySeries* listTDI2 = NULL;
ListmodesCAmpPhaseFrequencySeries* listTDI3 = NULL;
/* Checking that the global injectedparams has been set up */
if (!injectedparams) {
printf("Error: when calling LISAGenerateSignal, injectedparams points to NULL.\n");
exit(1);
}
/* Starting frequency corresponding to duration of observation deltatobs */
double fstartobs = 0.;
if(!(globalparams->deltatobs==0.)) fstartobs = Newtonianfoft(params->m1, params->m2, globalparams->deltatobs);
/* Generate the waveform with the ROM */
/* NOTE: SimEOBNRv2HMROM accepts masses and distances in SI units, whereas LISA params is in solar masses and Mpc */
/* NOTE: minf and deltatobs are taken into account if extension is allowed, but not maxf - restriction to the relevant frequency interval will occur in both the response prcessing and overlap computation */
/* If extending, taking into account both fstartobs and minf */
if(!(globalparams->tagextpn)) {
//printf("Not Extending signal waveform. Mfmatch=%g\n",globalparams->Mfmatch);
ret = SimEOBNRv2HMROM(&listROM, params->nbmode, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI, globalparams->setphiRefatfRef);
} else {
//printf("Extending signal waveform. Mfmatch=%g\n",globalparams->Mfmatch);
ret = SimEOBNRv2HMROMExtTF2(&listROM, params->nbmode, globalparams->Mfmatch, fmax(fstartobs, globalparams->minf), 0, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI, globalparams->setphiRefatfRef);
}
if(ret==FAILURE){
//printf("LISAGenerateSignalCAmpPhase: Generation of ROM for injection failed!\n");
return FAILURE;
}
//listmodesCAmpPhaseTrim(listROM);//Eliminate parts of the wf our of range
/*ListmodesCAmpPhaseFrequencySeries* listelem = listROM;
while(listelem){
printf("Result....\n");
printf("listelem: %i %i %i\n",listelem->freqseries->amp_real->size,listelem->l,listelem->m);
for(i=0;i<listelem->freqseries->freq->size;i++){
double f=listelem->freqseries->freq->data[i];
if(((int)(log(f)*40))%10==0)printf("%g:\n",f);
printf(" %g %g %g %g\n",f,listelem->freqseries->amp_real->data[i],listelem->freqseries->amp_imag->data[i],listelem->freqseries->phase->data[i]);
if(i>0&&i<listelem->freqseries->freq->size-1){
double yp=listelem->freqseries->phase->data[i+1];
double y0=listelem->freqseries->phase->data[i];
double ym=listelem->freqseries->phase->data[i-1];
double fp=listelem->freqseries->freq->data[i+1];
double f0=listelem->freqseries->freq->data[i];
double fm=listelem->freqseries->freq->data[i-1];
printf(" fdot: %g\n", ( (yp-y0)/(fp-f0)-(ym-y0)/(fm-f0) ) * 2 / (fp-f0) * f0 *f0);
}
}
listelem=listelem->next;
}*/
//
//printf("%d|%g|%g|%g|%g|%g|%g\n", params->nbmode, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI);
/* If the ROM waveform generation failed (e.g. parameters were out of bounds) return FAILURE */
if(ret==FAILURE) return FAILURE;
/* Process the waveform through the LISA response */
//WARNING: tRef is ignored for now, i.e. set to 0
//TESTING
//clock_t tbeg, tend;
//tbeg = clock();
//#pragma omp critical(LISAgensig)
LISASimFDResponseTDI3Chan(globalparams->tagtRefatLISA, globalparams->variant, &listROM, &listTDI1, &listTDI2, &listTDI3, params->tRef, params->lambda, params->beta, params->inclination, params->polarization, params->m1, params->m2, globalparams->maxf, globalparams->tagtdi, globalparams->frozenLISA, globalparams->responseapprox);
//tend = clock();
//printf("time LISASimFDResponse: %g\n", (double) (tend-tbeg)/CLOCKS_PER_SEC);
//exit(0);
//
/* Pre-interpolate the injection, building the spline matrices */
ListmodesCAmpPhaseSpline* listsplinesgen1 = NULL;
ListmodesCAmpPhaseSpline* listsplinesgen2 = NULL;
ListmodesCAmpPhaseSpline* listsplinesgen3 = NULL;
BuildListmodesCAmpPhaseSpline(&listsplinesgen1, listTDI1);
BuildListmodesCAmpPhaseSpline(&listsplinesgen2, listTDI2);
BuildListmodesCAmpPhaseSpline(&listsplinesgen3, listTDI3);
/* Precompute the inner product (h|h) - takes into account the length of the observation with deltatobs */
double fLow = fmax(__LISASimFD_Noise_fLow, globalparams->minf);
double fHigh = fmin(__LISASimFD_Noise_fHigh, globalparams->maxf);
ObjectFunction NoiseSn1 = NoiseFunction(globalparams->variant,globalparams->tagtdi, 1);
ObjectFunction NoiseSn2 = NoiseFunction(globalparams->variant,globalparams->tagtdi, 2);
ObjectFunction NoiseSn3 = NoiseFunction(globalparams->variant,globalparams->tagtdi, 3);
//TESTING
//tbeg = clock();
double TDI123hh = FDListmodesFresnelOverlap3Chan(listTDI1, listTDI2, listTDI3, listsplinesgen1, listsplinesgen2, listsplinesgen3, &NoiseSn1, &NoiseSn2, &NoiseSn3, fLow, fHigh, fstartobs, fstartobs);
//tend = clock();
//printf("time SNRs: %g\n", (double) (tend-tbeg)/CLOCKS_PER_SEC);
//exit(0);
//
/* Output and clean up */
signal->TDI1Signal = listTDI1;
signal->TDI2Signal = listTDI2;
signal->TDI3Signal = listTDI3;
signal->TDI123hh = TDI123hh;
ListmodesCAmpPhaseFrequencySeries_Destroy(listROM);
ListmodesCAmpPhaseSpline_Destroy(listsplinesgen1);
ListmodesCAmpPhaseSpline_Destroy(listsplinesgen2);
ListmodesCAmpPhaseSpline_Destroy(listsplinesgen3);
return SUCCESS;
}
/* Function generating a LISA signal as a list of modes in CAmp/Phase form, from LISA parameters */
int LISAGenerateInjectionCAmpPhase(
struct tagLISAParams* params, /* Input: set of LISA parameters of the signal */
struct tagLISAInjectionCAmpPhase* signal) /* Output: structure for the injected signal */
{
int ret;
ListmodesCAmpPhaseFrequencySeries* listROM = NULL;
ListmodesCAmpPhaseFrequencySeries* listTDI1 = NULL;
ListmodesCAmpPhaseFrequencySeries* listTDI2 = NULL;
ListmodesCAmpPhaseFrequencySeries* listTDI3 = NULL;
/* Starting frequency corresponding to duration of observation deltatobs */
double fstartobs = 0.;
if(!(globalparams->deltatobs==0.)) fstartobs = Newtonianfoft(params->m1, params->m2, globalparams->deltatobs);
/* Generate the waveform with the ROM */
/* NOTE: SimEOBNRv2HMROM accepts masses and distances in SI units, whereas LISA params is in solar masses and Mpc */
/* NOTE: minf and deltatobs are taken into account if extension is allowed, but not maxf - restriction to the relevant frequency interval will occur in both the response prcessing and overlap computation */
/* If extending, taking into account both fstartobs and minf */
if(!(globalparams->tagextpn)) {
//printf("Not Extending signal waveform. Mfmatch=%g\n",globalparams->Mfmatch);
ret = SimEOBNRv2HMROM(&listROM, params->nbmode, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI, globalparams->setphiRefatfRef);
} else {
//printf("Extending signal waveform. Mfmatch=%g\n",globalparams->Mfmatch);
ret = SimEOBNRv2HMROMExtTF2(&listROM, params->nbmode, globalparams->Mfmatch, fmax(fstartobs, globalparams->minf), 0, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI, globalparams->setphiRefatfRef);
}
/* If the ROM waveform generation failed (e.g. parameters were out of bounds) return FAILURE */
if(ret==FAILURE){
printf("Failed to generate injection ROM\n");
return FAILURE;
}
//listmodesCAmpPhaseTrim(listROM);//Eliminate parts of the wf our of range
/*
printf("Result....\n");
printf("listROM: %i %i %i\n",listROM->freqseries->amp_real->size,listROM->l,listROM->m);
int i;
ListmodesCAmpPhaseFrequencySeries* listelem = listROM;
while(listelem){
printf("Result....\n");
printf("listelem: %i %i %i\n",listelem->freqseries->amp_real->size,listelem->l,listelem->m);
for(i=0;i<listelem->freqseries->freq->size;i++){
double f=listelem->freqseries->freq->data[i];
if(((int)(log(f)*40))%10==0)printf("%g:\n",f);
printf(" %g %g %g %g\n",listelem->freqseries->freq->data[i],listelem->freqseries->amp_real->data[i],listelem->freqseries->amp_imag->data[i],listelem->freqseries->phase->data[i]);
}
listelem=listelem->next;
}
*/
/* Process the waveform through the LISA response */
//WARNING: tRef is ignored for now, i.e. set to 0
//TESTING
//clock_t tbeg, tend;
//tbeg = clock();
LISASimFDResponseTDI3Chan(globalparams->tagtRefatLISA, globalparams->variant, &listROM, &listTDI1, &listTDI2, &listTDI3, injectedparams->tRef, params->lambda, params->beta, params->inclination, params->polarization, params->m1, params->m2, globalparams->maxf, globalparams->tagtdi, globalparams->frozenLISA, globalparams->responseapprox);
//tend = clock();
//printf("time LISASimFDResponse: %g\n", (double) (tend-tbeg)/CLOCKS_PER_SEC);
//
/* Pre-interpolate the injection, building the spline matrices */
ListmodesCAmpPhaseSpline* listsplinesinj1 = NULL;
ListmodesCAmpPhaseSpline* listsplinesinj2 = NULL;
ListmodesCAmpPhaseSpline* listsplinesinj3 = NULL;
BuildListmodesCAmpPhaseSpline(&listsplinesinj1, listTDI1);
BuildListmodesCAmpPhaseSpline(&listsplinesinj2, listTDI2);
BuildListmodesCAmpPhaseSpline(&listsplinesinj3, listTDI3);
/* Precompute the inner product (h|h) - takes into account the length of the observation with deltatobs */
double fLow = fmax(__LISASimFD_Noise_fLow, globalparams->minf);
double fHigh = fmin(__LISASimFD_Noise_fHigh, globalparams->maxf);
ObjectFunction NoiseSn1 = NoiseFunction(globalparams->variant,globalparams->tagtdi, 1);
ObjectFunction NoiseSn2 = NoiseFunction(globalparams->variant,globalparams->tagtdi, 2);
ObjectFunction NoiseSn3 = NoiseFunction(globalparams->variant,globalparams->tagtdi, 3);
//TESTING
//tbeg = clock();
double TDI123ss = FDListmodesFresnelOverlap3Chan(listTDI1, listTDI2, listTDI3, listsplinesinj1, listsplinesinj2, listsplinesinj3, &NoiseSn1, &NoiseSn2, &NoiseSn3, fLow, fHigh, fstartobs, fstartobs);
//tend = clock();
//printf("time SNRs: %g\n", (double) (tend-tbeg)/CLOCKS_PER_SEC);
/* Output and clean up */
signal->TDI1Splines = listsplinesinj1;
signal->TDI2Splines = listsplinesinj2;
signal->TDI3Splines = listsplinesinj3;
signal->TDI123ss = TDI123ss;
ListmodesCAmpPhaseFrequencySeries_Destroy(listROM);
ListmodesCAmpPhaseFrequencySeries_Destroy(listTDI1);
ListmodesCAmpPhaseFrequencySeries_Destroy(listTDI2);
ListmodesCAmpPhaseFrequencySeries_Destroy(listTDI3);
return SUCCESS;
}
/* Function generating a LISA signal as a frequency series in Re/Im form where the modes have been summed, from LISA parameters - takes as argument the frequencies on which to evaluate */
int LISAGenerateSignalReIm(
struct tagLISAParams* params, /* Input: set of LISA parameters of the template */
gsl_vector* freq, /* Input: frequencies on which evaluating the waveform (from the injection) */
struct tagLISASignalReIm* signal) /* Output: structure for the generated signal */
{
int ret;
ListmodesCAmpPhaseFrequencySeries* listROM = NULL;
ListmodesCAmpPhaseFrequencySeries* listTDI1 = NULL;
ListmodesCAmpPhaseFrequencySeries* listTDI2 = NULL;
ListmodesCAmpPhaseFrequencySeries* listTDI3 = NULL;
/* Checking that the global injectedparams has been set up */
if (!injectedparams) {
printf("Error: when calling LISAGenerateSignalReIm, injectedparams points to NULL.\n");
exit(1);
}
/* Starting frequency corresponding to duration of observation deltatobs */
double fstartobs = 0.;
if(!(globalparams->deltatobs==0.)) fstartobs = Newtonianfoft(params->m1, params->m2, globalparams->deltatobs);
/* Generate the waveform with the ROM */
/* NOTE: SimEOBNRv2HMROM accepts masses and distances in SI units, whereas LISA params is in solar masses and Mpc */
/* NOTE: minf and deltatobs are taken into account if extension is allowed, but not maxf - restriction to the relevant frequency interval will occur in both the response prcessing and overlap computation */
/* If extending, taking into account both fstartobs and minf */
if(!(globalparams->tagextpn)) {
//printf("Not Extending signal waveform. Mfmatch=%g\n",globalparams->Mfmatch);
ret = SimEOBNRv2HMROM(&listROM, params->nbmode, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI, globalparams->setphiRefatfRef);
} else {
//printf("Extending signal waveform. Mfmatch=%g\n",globalparams->Mfmatch);
ret = SimEOBNRv2HMROMExtTF2(&listROM, params->nbmode, globalparams->Mfmatch, fmax(fstartobs, globalparams->minf), 0, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI, globalparams->setphiRefatfRef);
}
/* If the ROM waveform generation failed (e.g. parameters were out of bounds) return FAILURE */
if(ret==FAILURE) return FAILURE;
//listmodesCAmpPhaseTrim(listROM);//Eliminate parts of the wf our of range
/* Process the waveform through the LISA response */
//WARNING: tRef is ignored for now, i.e. set to 0
//TESTING
//clock_t tbeg, tend;
//tbeg = clock();
LISASimFDResponseTDI3Chan(globalparams->tagtRefatLISA, globalparams->variant, &listROM, &listTDI1, &listTDI2, &listTDI3, injectedparams->tRef, params->lambda, params->beta, params->inclination, params->polarization, params->m1, params->m2, globalparams->maxf, globalparams->tagtdi, globalparams->frozenLISA, globalparams->responseapprox);
//tend = clock();
//printf("time LISASimFDResponse: %g\n", (double) (tend-tbeg)/CLOCKS_PER_SEC);
//
/* Initialize structures for the ReIm frequency series */
int nbpts = (int) freq->size;
ReImFrequencySeries* TDI1 = NULL;
ReImFrequencySeries* TDI2 = NULL;
ReImFrequencySeries* TDI3 = NULL;
ReImFrequencySeries_Init(&TDI1, nbpts);
ReImFrequencySeries_Init(&TDI2, nbpts);
ReImFrequencySeries_Init(&TDI3, nbpts);
/* Compute the Re/Im frequency series - takes into account the length of the observation with deltatobs */
double fLow = fmax(__LISASimFD_Noise_fLow, globalparams->minf);
double fHigh = fmin(__LISASimFD_Noise_fHigh, globalparams->maxf);
//TESTING
//tbeg = clock();
ReImFrequencySeries_SumListmodesCAmpPhaseFrequencySeries(TDI1, listTDI1, freq, fLow, fHigh, fstartobs);
ReImFrequencySeries_SumListmodesCAmpPhaseFrequencySeries(TDI2, listTDI2, freq, fLow, fHigh, fstartobs);
ReImFrequencySeries_SumListmodesCAmpPhaseFrequencySeries(TDI3, listTDI3, freq, fLow, fHigh, fstartobs);
//tend = clock();
//printf("time ReIm: %g\n", (double) (tend-tbeg)/CLOCKS_PER_SEC);
//
/* Output and clean up */
signal->TDI1Signal = TDI1;
signal->TDI2Signal = TDI2;
signal->TDI3Signal = TDI3;
ListmodesCAmpPhaseFrequencySeries_Destroy(listROM);
ListmodesCAmpPhaseFrequencySeries_Destroy(listTDI1);
ListmodesCAmpPhaseFrequencySeries_Destroy(listTDI2);
ListmodesCAmpPhaseFrequencySeries_Destroy(listTDI3);
return SUCCESS;
}
/* Function generating a LISA injection signal as a frequency series in Re/Im form where the modes have been summed, from LISA parameters - determines the frequencies */
int LISAGenerateInjectionReIm(
struct tagLISAParams* params, /* Input: set of LISA parameters of the template */
double fLow, /* Input: additional lower frequency limit (argument minf) */
int nbpts, /* Input: number of frequency samples */
int tagsampling, /* Input: tag for using linear (0) or logarithmic (1) sampling */
struct tagLISAInjectionReIm* injection) /* Output: structure for the generated signal */
{
int ret;
ListmodesCAmpPhaseFrequencySeries* listROM = NULL;
ListmodesCAmpPhaseFrequencySeries* listTDI1 = NULL;
ListmodesCAmpPhaseFrequencySeries* listTDI2 = NULL;
ListmodesCAmpPhaseFrequencySeries* listTDI3 = NULL;
/* Starting frequency corresponding to duration of observation deltatobs */
double fstartobs = 0.;
if(!(globalparams->deltatobs==0.)) fstartobs = Newtonianfoft(params->m1, params->m2, globalparams->deltatobs);
/* Generate the waveform with the ROM */
/* NOTE: SimEOBNRv2HMROM accepts masses and distances in SI units, whereas LISA params is in solar masses and Mpc */
/* NOTE: minf and deltatobs are taken into account if extension is allowed, but not maxf - restriction to the relevant frequency interval will occur in both the response prcessing and overlap computation */
/* If extending, taking into account both fstartobs and minf */
if(!(globalparams->tagextpn)) {
//printf("Not Extending signal waveform. Mfmatch=%g\n",globalparams->Mfmatch);
ret = SimEOBNRv2HMROM(&listROM, params->nbmode, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI, globalparams->setphiRefatfRef);
} else {
//printf("Extending signal waveform. Mfmatch=%g\n",globalparams->Mfmatch);
ret = SimEOBNRv2HMROMExtTF2(&listROM, params->nbmode, globalparams->Mfmatch, fmax(fstartobs, globalparams->minf), 0, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI, globalparams->setphiRefatfRef);
}
/* If the ROM waveform generation failed (e.g. parameters were out of bounds) return FAILURE */
if(ret==FAILURE){
exit(1);
return FAILURE;
}
//listmodesCAmpPhaseTrim(listROM);//Eliminate parts of the wf our of range
/* Process the waveform through the LISA response */
//WARNING: tRef is ignored for now, i.e. set to 0
//TESTING
//clock_t tbeg, tend;
//tbeg = clock();
LISASimFDResponseTDI3Chan(globalparams->tagtRefatLISA, globalparams->variant, &listROM, &listTDI1, &listTDI2, &listTDI3, params->tRef, params->lambda, params->beta, params->inclination, params->polarization, params->m1, params->m2, globalparams->maxf, globalparams->tagtdi, globalparams->frozenLISA, globalparams->responseapprox);
//tend = clock();
//printf("time LISASimFDResponse: %g\n", (double) (tend-tbeg)/CLOCKS_PER_SEC);
//
/* Determine the frequency vector - uses the fact that the detector limiting frequencies are the same in all channels - takes into account the length of the observation with deltatobs */
gsl_vector* freq = gsl_vector_alloc(nbpts);
double fLowCut = fmax(fmax(__LISASimFD_Noise_fLow, fLow), fstartobs);
double fHigh = fmin(__LISASimFD_Noise_fHigh, globalparams->maxf);
ListmodesSetFrequencies(listROM, fLowCut, fHigh, nbpts, tagsampling, freq);
/* Initialize structures for the ReIm frequency series */
ReImFrequencySeries* TDI1 = NULL;
ReImFrequencySeries* TDI2 = NULL;
ReImFrequencySeries* TDI3 = NULL;
ReImFrequencySeries_Init(&TDI1, nbpts);
ReImFrequencySeries_Init(&TDI2, nbpts);
ReImFrequencySeries_Init(&TDI3, nbpts);
/* Compute the Re/Im frequency series */
//TESTING
//tbeg = clock();
ReImFrequencySeries_SumListmodesCAmpPhaseFrequencySeries(TDI1, listTDI1, freq, fLow, fHigh, fstartobs);
ReImFrequencySeries_SumListmodesCAmpPhaseFrequencySeries(TDI2, listTDI2, freq, fLow, fHigh, fstartobs);
ReImFrequencySeries_SumListmodesCAmpPhaseFrequencySeries(TDI3, listTDI3, freq, fLow, fHigh, fstartobs);
//tend = clock();
//printf("time ReIm: %g\n", (double) (tend-tbeg)/CLOCKS_PER_SEC);
//
/* Compute the noise values */
ObjectFunction NoiseSn1 = NoiseFunction(globalparams->variant,globalparams->tagtdi, 1);
ObjectFunction NoiseSn2 = NoiseFunction(globalparams->variant,globalparams->tagtdi, 2);
ObjectFunction NoiseSn3 = NoiseFunction(globalparams->variant,globalparams->tagtdi, 3);
gsl_vector* noisevalues1 = gsl_vector_alloc(nbpts);
gsl_vector* noisevalues2 = gsl_vector_alloc(nbpts);
gsl_vector* noisevalues3 = gsl_vector_alloc(nbpts);
EvaluateNoise(noisevalues1, freq, &NoiseSn1, __LISASimFD_Noise_fLow, __LISASimFD_Noise_fHigh);
EvaluateNoise(noisevalues2, freq, &NoiseSn2, __LISASimFD_Noise_fLow, __LISASimFD_Noise_fHigh);
EvaluateNoise(noisevalues3, freq, &NoiseSn3, __LISASimFD_Noise_fLow, __LISASimFD_Noise_fHigh);
/* Output and clean up */
injection->TDI1Signal = TDI1;
injection->TDI2Signal = TDI2;
injection->TDI3Signal = TDI3;
injection->freq = freq;
injection->noisevalues1 = noisevalues1;
injection->noisevalues2 = noisevalues2;
injection->noisevalues3 = noisevalues3;
ListmodesCAmpPhaseFrequencySeries_Destroy(listROM);
ListmodesCAmpPhaseFrequencySeries_Destroy(listTDI1);
ListmodesCAmpPhaseFrequencySeries_Destroy(listTDI2);
ListmodesCAmpPhaseFrequencySeries_Destroy(listTDI3);
return SUCCESS;
}
/* Log-Likelihood function */
// Routines for simplified likelihood 22 mode, frozen LISA, lowf
static double funcphiL(LISAParams *params) {
double MfROMmax22 = 0.14;
double fRef = MfROMmax22/((params->m1 + params->m2)*MTSUN_SI);
return -params->phiRef + PI*params->tRef*fRef;
}
/* Old L-frame convention */
/* lambdaL_old = lambdaL_paper - pi/2 */
static double funclambdaL_old(LISAParams *params) {
double lambd = params->lambda;
double beta = params->beta;
return -atan2(cos(beta)*cos(lambd)*cos(PI/3) + sin(beta)*sin(PI/3), cos(beta)*sin(lambd));
}
/* New L-frame convention for the paper */
/* lambdaL_old = lambdaL_paper - pi/2 */
static double funclambdaL(LISAParams *params) {
double lambd = params->lambda;
double beta = params->beta;
return atan2(cos(beta)*sin(lambd), cos(beta)*cos(lambd)*cos(PI/3) + sin(beta)*sin(PI/3));
}
static double funcbetaL(LISAParams *params) {
double lambd = params->lambda;
double beta = params->beta;
return -asin(cos(beta)*cos(lambd)*sin(PI/3) - sin(beta)*cos(PI/3));
}
/* The old and new functions for psiL are in fact equivalent - the new one is simpler */
static double funcpsiL_old(LISAParams *params) {
double lambd = params->lambda;
double beta = params->beta;
double psi = params->polarization;
return atan2(cos(PI/3)*cos(beta)*sin(psi) - sin(PI/3)*(sin(lambd)*cos(psi) - cos(lambd)*sin(beta)*sin(psi)), cos(PI/3)*cos(beta)*cos(psi) + sin(PI/3)*(sin(lambd)*sin(psi) + cos(lambd)*sin(beta)*cos(psi)));
}
/* The old and new functions for psiL are in fact equivalent - the new one is simpler */
/* Here return polarization modulo pi */
static double funcpsiL(LISAParams *params) {
double lambd = params->lambda;
double beta = params->beta;
double psi = params->polarization;
return modpi(psi + atan2(-sin(PI/3)*sin(lambd), cos(PI/3)*cos(beta) + sin(PI/3)*cos(lambd)*sin(beta)));
}
/* Functions computing sa, se for 22 mode only */
/* NOTE: old convention, differs from new convention by absence of factor 3 absorbed in the inner products <lm|l'm'> */
static double complex funcsa(double d, double phi, double inc, double lambd, double beta, double psi) {
double complex Daplus = I*3./4 * (3 - cos(2*beta)) * cos(2*lambd - PI/3);
double complex Dacross = I*3*sin(beta) * sin(2*lambd - PI/3);
double complex a22 = 1./d*1./2 * sqrt(5/PI) * pow(cos(inc/2), 4) * cexp(2.*I*(-phi-psi)) * 1./2*(Daplus + I*Dacross);
double complex a2m2 = 1./d*1./2 * sqrt(5/PI) * pow(sin(inc/2), 4) * cexp(2.*I*(-phi+psi)) * 1./2*(Daplus - I*Dacross);
return a22 + a2m2;
}
static double complex funcse(double d, double phi, double inc, double lambd, double beta, double psi) {
double complex Deplus = -I*3./4 * (3 - cos(2*beta)) * sin(2*lambd - PI/3);
double complex Decross = I*3*sin(beta) * cos(2*lambd - PI/3);
double complex e22 = 1./d*1./2 * sqrt(5/PI) * pow(cos(inc/2), 4) * cexp(2.*I*(-phi-psi)) * 1./2*(Deplus + I*Decross);
double complex e2m2 = 1./d*1./2 * sqrt(5/PI) * pow(sin(inc/2), 4) * cexp(2.*I*(-phi+psi)) * 1./2*(Deplus - I*Decross);
return e22 + e2m2;
}
/* Functions computing sa_lm, se_lm for generic modes */
/* NOTE: new convention, factor 3 absorbed in the inner products <lm|l'm'> */
static double complex funcsa_lm(int l, int m, double d, double phi, double inc, double lambd, double beta, double psi) {
double complex Faplus = 1./2 * (1 + sin(beta)*sin(beta)) * cos(2*lambd - PI/3);
double complex Facross = sin(beta) * sin(2*lambd - PI/3);
double complex sYlm = SpinWeightedSphericalHarmonic(inc, phi, -2, l, m);
double complex sYl_minusm_star = conj(SpinWeightedSphericalHarmonic(inc, phi, -2, l, -m));
double complex alpha_lm = 1./d * 1./2 * sYlm * cexp(-2.*I*psi) * (Faplus + I*Facross);
double complex alpha_lminusm = 1./d * 1./2 * pow(-1., l) * sYl_minusm_star * cexp(2.*I*psi) * (Faplus - I*Facross);
double complex sa = alpha_lm + alpha_lminusm;
return sa;
}
static double complex funcse_lm(int l, int m, double d, double phi, double inc, double lambd, double beta, double psi) {
double complex Feplus = 1./2 * (1 + sin(beta)*sin(beta)) * cos(2*lambd + PI/6);
double complex Fecross = sin(beta) * sin(2*lambd + PI/6);
double complex sYlm = SpinWeightedSphericalHarmonic(inc, phi, -2, l, m);
double complex sYl_minusm_star = conj(SpinWeightedSphericalHarmonic(inc, phi, -2, l, -m));
double complex eps_lm = 1./d * 1./2 * sYlm * cexp(-2.*I*psi) * (Feplus + I*Fecross);
double complex eps_lminusm = 1./d * 1./2 * pow(-1., l) * sYl_minusm_star * cexp(2.*I*psi) * (Feplus - I*Fecross);
double complex se = eps_lm + eps_lminusm;
return se;
}
double CalculateLogLSimpleLikelihood22(SimpleLikelihoodPrecomputedValues22* simplelikelihoodvals22, LISAParams* params)
{
/* Simple likelihood for 22 mode, frozen LISA, lowf */
/* Only applicable for masses and time pinned to injection values */
/* Normalization and sainj, seinj must have been precomputed for the injection */
double norm = simplelikelihoodvals22->normalization;
double complex sainj = simplelikelihoodvals22->sa;
double complex seinj = simplelikelihoodvals22->se;
double phiL = funcphiL(params);
double lambdL = funclambdaL_old(params);
double betaL = funcbetaL(params);
double psiL = funcpsiL_old(params);
double inc = params->inclination;
double d = params->distance / injectedparams->distance;
double complex sa = funcsa(d, phiL, inc, lambdL, betaL, psiL);
double complex se = funcse(d, phiL, inc, lambdL, betaL, psiL);
double simplelogL = -1./2 * norm * (pow(cabs(sa - sainj), 2) + pow(cabs(se - seinj), 2));
return simplelogL;
}
double CalculateLogLSimpleLikelihoodHM(SimpleLikelihoodPrecomputedValuesHM* simplelikelihoodvalsHM, LISAParams* params)
{
/* Modes in the injection and the modes in the template have to be the same because of the precomputed overlaps */
if ( !((params->nbmode)==(injectedparams->nbmode)) ) {
printf("Error in CalculateLogLSimpleLikelihoodHM: nbmode in injection and template must be the same.\n");
exit(1);
}
double simplelogL = 0.;
double complex sa_lm_val = 0., se_lm_val = 0., sa_lpmp_val = 0., se_lpmp_val = 0.;
double complex sa_lm_inj_val = 0., se_lm_inj_val = 0., sa_lpmp_inj_val = 0., se_lpmp_inj_val = 0.;
double Lambda_lm_lpmp = 0.;
/* Simple likelihood for a set of modes, frozen LISA, lowf */
/* Only applicable for masses and time pinned to injection values */
/* Weighted inner products Lambda_lm_lpmp and sa_lm, se_lm must have been precomputed for the injection */
gsl_matrix* matrix_Lambda_lm_lpmp = simplelikelihoodvalsHM->Lambda_lm_lpmp;
gsl_vector* sa_lm_inj_real = simplelikelihoodvalsHM->sa_lm_real;
gsl_vector* sa_lm_inj_imag = simplelikelihoodvalsHM->sa_lm_imag;
gsl_vector* se_lm_inj_real = simplelikelihoodvalsHM->se_lm_real;
gsl_vector* se_lm_inj_imag = simplelikelihoodvalsHM->se_lm_imag;
/* Compute set of sa_lm and se_lm for template */
double phiL = funcphiL(params);
double lambdL = funclambdaL(params);
double betaL = funcbetaL(params);
double psiL = funcpsiL(params);
double inc = params->inclination;
double d = params->distance / injectedparams->distance;
double complex sa_lm[injectedparams->nbmode];
double complex se_lm[injectedparams->nbmode];
for(int i=0; i<injectedparams->nbmode; i++) {
int l = listmode[i][0];
int m = listmode[i][1];
sa_lm_val = funcsa_lm(l, m, d, phiL, inc, lambdL, betaL, psiL);
se_lm_val = funcse_lm(l, m, d, phiL, inc, lambdL, betaL, psiL);
sa_lm[i] = sa_lm_val;
se_lm[i] = se_lm_val;
}
/* Loop on the modes in the injection and the modes in the template (have to be the same because of the precomputed overlaps) */
for(int i=0; i<injectedparams->nbmode; i++) {
for(int j=0; j<injectedparams->nbmode; j++) {
int l = listmode[i][0];
int m = listmode[i][1];
int lp = listmode[j][0];
int mp = listmode[j][1];
/* sa_lm and se_lm for the template */
sa_lm_val = sa_lm[i];
se_lm_val = se_lm[i];
sa_lpmp_val = sa_lm[j];
se_lpmp_val = se_lm[j];
/* sa_lm and se_lm for the template */
sa_lm_inj_val = gsl_vector_get(sa_lm_inj_real, i) + I*gsl_vector_get(sa_lm_inj_imag, i);
se_lm_inj_val = gsl_vector_get(se_lm_inj_real, i) + I*gsl_vector_get(se_lm_inj_imag, i);
sa_lpmp_inj_val = gsl_vector_get(sa_lm_inj_real, j) + I*gsl_vector_get(sa_lm_inj_imag, j);
se_lpmp_inj_val = gsl_vector_get(se_lm_inj_real, j) + I*gsl_vector_get(se_lm_inj_imag, j);
Lambda_lm_lpmp = gsl_matrix_get(matrix_Lambda_lm_lpmp, i, j);
/* NOTE: simplelogL is real, but that is guaranteed by the symmetry of the sum over modes lm and lpmp -- individual terms are complex */
simplelogL += -1./2 * Lambda_lm_lpmp * creal((sa_lm_val - sa_lm_inj_val) * conj(sa_lpmp_val - sa_lpmp_inj_val) + (se_lm_val - se_lm_inj_val) * conj(se_lpmp_val - se_lpmp_inj_val));
}
}
return simplelogL;
}
double CalculateLogLCAmpPhase(LISAParams *params, LISAInjectionCAmpPhase* injection)
{
double logL = -DBL_MAX;
int ret;
/* Generating the signal in the three detectors for the input parameters */
LISASignalCAmpPhase* generatedsignal = NULL;
LISASignalCAmpPhase_Init(&generatedsignal);
//TESTING
//clock_t tbeg, tend;
//tbeg = clock();
ret = LISAGenerateSignalCAmpPhase(params, generatedsignal);
//tend = clock();
//printf("time GenerateSignal: %g\n", (double) (tend-tbeg)/CLOCKS_PER_SEC);
//
//
//printf("in CalculateLogLCAmpPhase: tRef= %g\n", params->tRef);
/* If LISAGenerateSignal failed (e.g. parameters out of bound), silently return -Infinity logL */
if(ret==FAILURE) {
logL = -DBL_MAX;
}
else if(ret==SUCCESS) {
/* Computing the likelihood for each TDI channel - fstartobs is the max between the fstartobs of the injected and generated signals */
double fstartobsinjected = Newtonianfoft(injectedparams->m1, injectedparams->m2, globalparams->deltatobs);
double fstartobsgenerated = Newtonianfoft(params->m1, params->m2, globalparams->deltatobs);
double fLow = fmax(__LISASimFD_Noise_fLow, globalparams->minf);
double fHigh = fmin(__LISASimFD_Noise_fHigh, globalparams->maxf);
ObjectFunction NoiseSn1 = NoiseFunction(globalparams->variant,globalparams->tagtdi, 1);
ObjectFunction NoiseSn2 = NoiseFunction(globalparams->variant,globalparams->tagtdi, 2);
ObjectFunction NoiseSn3 = NoiseFunction(globalparams->variant,globalparams->tagtdi, 3);
//TESTING
//tbeg = clock();
//
//printf("fLow, fHigh, fstartobsinjected, fstartobsgenerated = %g, %g, %g, %g\n", fLow, fHigh, fstartobsinjected, fstartobsgenerated);
double overlapTDI123 = FDListmodesFresnelOverlap3Chan(generatedsignal->TDI1Signal, generatedsignal->TDI2Signal, generatedsignal->TDI3Signal, injection->TDI1Splines, injection->TDI2Splines, injection->TDI3Splines, &NoiseSn1, &NoiseSn2, &NoiseSn3, fLow, fHigh, fstartobsinjected, fstartobsgenerated);
//tend = clock();
//printf("time Overlaps: %g\n", (double) (tend-tbeg)/CLOCKS_PER_SEC);
//
/* Output: value of the loglikelihood for the combined signals, assuming noise independence */
logL = overlapTDI123 - 1./2*(injection->TDI123ss) - 1./2*(generatedsignal->TDI123hh);
if(logL>0){
printf("logL=%g\n",logL);
printf("overlapTDI123=%g, injection->TDI123ss=%g, generatedsignal->TDI123hh=%g\n", overlapTDI123, injection->TDI123ss, generatedsignal->TDI123hh);
report_LISAParams(params);
}
}
/* Clean up */
LISASignalCAmpPhase_Cleanup(generatedsignal);
return logL;
}
double CalculateLogLReIm(LISAParams *params, LISAInjectionReIm* injection)
{
double logL = -DBL_MAX;
int ret;
/* Frequency vector - assumes common to A,E,T, i.e. identical fLow, fHigh in all channels */
gsl_vector* freq = injection->freq;
/* Generating the signal in the three detectors for the input parameters */
LISASignalReIm* generatedsignal = NULL;
LISASignalReIm_Init(&generatedsignal);
//TESTING
//clock_t tbeg, tend;
//tbeg = clock();
ret = LISAGenerateSignalReIm(params, freq, generatedsignal);
//tend = clock();
//printf("time GenerateSignal: %g\n", (double) (tend-tbeg)/CLOCKS_PER_SEC);
//
/* If LISAGenerateSignal failed (e.g. parameters out of bound), silently return -Infinity logL */
if(ret==FAILURE) {
logL = -DBL_MAX;
}
else if(ret==SUCCESS) {
/* Computing the likelihood for each TDI channel - fstartobs has already been taken into account */
//TESTING
//tbeg = clock();
double loglikelihoodTDI1 = FDLogLikelihoodReIm(injection->TDI1Signal, generatedsignal->TDI1Signal, injection->noisevalues1);
double loglikelihoodTDI2 = FDLogLikelihoodReIm(injection->TDI2Signal, generatedsignal->TDI2Signal, injection->noisevalues2);
double loglikelihoodTDI3 = FDLogLikelihoodReIm(injection->TDI3Signal, generatedsignal->TDI3Signal, injection->noisevalues3);
//tend = clock();
//printf("time Overlaps: %g\n", (double) (tend-tbeg)/CLOCKS_PER_SEC);
//
/* Output: value of the loglikelihood for the combined signals, assuming noise independence */
logL = loglikelihoodTDI1 + loglikelihoodTDI2 + loglikelihoodTDI3;
}
/* Clean up */
LISASignalReIm_Cleanup(generatedsignal);
return logL;
}
double CalculateOverlapReIm(LISAParams params1, LISAParams params2, LISAInjectionReIm * injection)
{
double overlap = -DBL_MAX;
int ret;
/* Frequency vector - assumes common to A,E,T, i.e. identical fLow, fHigh in all channels */
gsl_vector* freq = injection->freq;
/* Generating the signal in the three detectors for the input parameters */
LISASignalReIm* signal1 = NULL;
LISASignalReIm* signal2 = NULL;
LISASignalReIm_Init(&signal1);
LISASignalReIm_Init(&signal2);
ret = LISAGenerateSignalReIm(¶ms1, freq, signal1);
if(ret==SUCCESS){
ret = LISAGenerateSignalReIm(¶ms2, freq, signal2);
}
/* If LISAGenerateSignal failed (e.g. parameters out of bound), silently return -Infinity logL */
if(ret==FAILURE) {
overlap = -DBL_MAX;
}
else if(ret==SUCCESS) {
/* Computing the likelihood for each TDI channel - fstartobs has already been taken into account */
double loglikelihoodTDI1 = FDLogLikelihoodReIm(signal1->TDI1Signal, signal2->TDI1Signal, injection->noisevalues1);
double loglikelihoodTDI2 = FDLogLikelihoodReIm(signal1->TDI2Signal, signal2->TDI2Signal, injection->noisevalues2);
double loglikelihoodTDI3 = FDLogLikelihoodReIm(signal1->TDI3Signal, signal2->TDI3Signal, injection->noisevalues3);
overlap = loglikelihoodTDI1 + loglikelihoodTDI2 + loglikelihoodTDI3;
}
/* Clean up */
LISASignalReIm_Cleanup(signal1);
LISASignalReIm_Cleanup(signal2);
//cout<<" overlap="<<overlap<<endl;
return overlap;
}
double CalculateOverlapCAmpPhase(LISAParams params1, LISAParams params2, LISAInjectionCAmpPhase * injection)
{
double overlap = -DBL_MAX;
int ret;
bool resampling=true;
double overlap_grid_rescale=128.0,grid_frac=0.98;
bool grid_rescale_top=true;
/* Generating the signal in the three detectors for the input parameters */
LISASignalCAmpPhase* signal1 = NULL;
LISASignalCAmpPhase_Init(&signal1);
//Note that the code for CAmpPhase overlaps is asymmetric, with one signal called the injection in the form of precomputed splines...
LISAInjectionCAmpPhase* signal2 = NULL;
LISAInjectionCAmpPhase_Init(&signal2);
ret = LISAGenerateSignalCAmpPhase(¶ms1, signal1);
if(ret==SUCCESS){
ret = LISAGenerateInjectionCAmpPhase(¶ms2, signal2);
}
if(resampling){
//For each mode we resample the signal1 grid to align with the nominal "injection" freq domain
//optionally rescaling the frequency grid (approximately) by factor overlap_grid_rescale.
//Note that the overlap uses signal1 to define the grid, so this realizes a change in the overlap sampling
//First we prepare splines to use later for interpolation
ListmodesCAmpPhaseSpline* listsplinesgen1 = NULL;
ListmodesCAmpPhaseSpline* listsplinesgen2 = NULL;
ListmodesCAmpPhaseSpline* listsplinesgen3 = NULL;
BuildListmodesCAmpPhaseSpline(&listsplinesgen1, signal1->TDI1Signal);
BuildListmodesCAmpPhaseSpline(&listsplinesgen2, signal1->TDI2Signal);
BuildListmodesCAmpPhaseSpline(&listsplinesgen3, signal1->TDI3Signal);
//loop over modes
ListmodesCAmpPhaseFrequencySeries* mode = signal1->TDI1Signal;
int nsize=-1;//Seem this must be the same number for all modes//set first time through loop.
while(mode) {
//cout<<"l= "<<mode->l<<" m="<<mode->m<<" nsize="<<nsize<<endl;
ListmodesCAmpPhaseSpline* centermode=ListmodesCAmpPhaseSpline_GetMode(injection->TDI1Splines,mode->l,mode->m);
gsl_vector_view ofreq_vv=gsl_matrix_column(centermode->splines->quadspline_phase,0);
gsl_vector* ofreq = &ofreq_vv.vector;
double s1f0=gsl_vector_get(mode->freqseries->freq,0);
double s1fend=gsl_vector_get(mode->freqseries->freq,mode->freqseries->freq->size-1);
double f0=gsl_vector_get(ofreq,0);
int osize=ofreq->size,i0=0;
double fend=gsl_vector_get(ofreq,osize-1);
if(f0<s1f0){//s1 does not extend down as far as nominal freq range; trim range
f0=s1f0;
while(gsl_vector_get(ofreq,i0)<f0 && i0<osize-1)i0++;//select old-grid index to immediate left of f0
}
//cout<<"start: i0="<<i0<<" n,o sizes = "<<nsize<<", "<<osize<<endl;
if(fend>s1fend){//s1 does not extend up as far as nominal freq range; trim range
fend=s1fend;
while(gsl_vector_get(ofreq,osize-1)>fend && i0<osize-1)osize--;//select old-grid index to immediate right of fend
//{cout<<"i0="<<i0<<" < "<<osize<<" f = "<<gsl_vector_get(ofreq,osize-1)<<" > "<<fend<<endl;osize--;}
}
if(nsize<0){
if(grid_rescale_top)
nsize=(overlap_grid_rescale*(1.0-grid_frac)+grid_frac)*(osize-i0);//Only set the first time (expected to be 22)
else
nsize=(1+(overlap_grid_rescale-1)*grid_frac)*(osize-i0);//Only set the first time (expected to be 22)
}
//cout<<"end: i0="<<i0<<" n,o sizes = "<<nsize<<", "<<osize<<endl;
gsl_vector* nfreq = gsl_vector_alloc(nsize);
double f=f0;
gsl_vector_set(nfreq,0,f);//first point
for(int i=1;i<nsize-1;i++){
while(gsl_vector_get(ofreq,i0)<f && i0<osize-2)i0++;//select old-grid index to left of f
//cout<<"i="<<i<<" i0="<<i0<<" n,o sizes = "<<nsize<<", "<<osize<<endl;
double odelta=gsl_vector_get(ofreq,i0+1)-gsl_vector_get(ofreq,i0);
double ofremain=gsl_vector_get(ofreq,osize-1)-gsl_vector_get(ofreq,i0);
double nfremain=fend-f;
double ndelta=odelta*nfremain/ofremain*(double)(osize-i0)/(double)(nsize-i);//rescale df by ratio of old/new mean df of remaining domain.
if(i0+1<(int)(osize*grid_frac)){
int effosize=osize*grid_frac,effnsize;
if(grid_rescale_top)effnsize=nsize/(overlap_grid_rescale*(1.0/grid_frac-1.0)+1.0);
else effnsize=nsize*grid_frac*overlap_grid_rescale/((overlap_grid_rescale-1.0)*grid_frac+1.0);
//cout<<"effosize="<<effosize<<" effnsize="<<effnsize<<endl;
if(effnsize<1)effnsize=1; //just in case
ofremain=gsl_vector_get(ofreq,effosize-1)-gsl_vector_get(ofreq,i0);
nfremain=gsl_vector_get(ofreq,effosize-1)-f;
ndelta=odelta*nfremain/ofremain*(double)(effosize-i0)/(double)(effnsize-i);//rescale df by ratio of old/new mean df of remaining domain.
}
//cout<<" ofreq(i0)="<<gsl_vector_get(ofreq,i0)<<" odelta="<<odelta<<" ofremain="<<ofremain<<endl;;
//cout<<" f,ndelta "<<f<<","<<ndelta<<" fend="<<fend<<endl;
f+=ndelta;
gsl_vector_set(nfreq,i,f);
}
gsl_vector_set(nfreq,nsize-1,fend);//last point
//Also need to work with TDI2 and TDI3
ListmodesCAmpPhaseFrequencySeries* mode2 = ListmodesCAmpPhaseFrequencySeries_GetMode(signal1->TDI2Signal,mode->l,mode->m);
ListmodesCAmpPhaseFrequencySeries* mode3 = ListmodesCAmpPhaseFrequencySeries_GetMode(signal1->TDI3Signal,mode->l,mode->m);
CAmpPhaseSpline * splines1 = ListmodesCAmpPhaseSpline_GetMode(listsplinesgen1,mode->l,mode->m)->splines;
CAmpPhaseSpline * splines2 = ListmodesCAmpPhaseSpline_GetMode(listsplinesgen2,mode->l,mode->m)->splines;
CAmpPhaseSpline * splines3 = ListmodesCAmpPhaseSpline_GetMode(listsplinesgen3,mode->l,mode->m)->splines;
//resize allocated memory
//CAmpPhaseFrequencySeries_Cleanup(mode->freqseries);
//mode->freqseries=new CAmpPhaseFrequencySeries;
CAmpPhaseFrequencySeries_Init(&mode->freqseries,nsize);
//CAmpPhaseFrequencySeries_Cleanup(mode2->freqseries);
CAmpPhaseFrequencySeries_Init(&mode2->freqseries,nsize);
//CAmpPhaseFrequencySeries_Cleanup(mode3->freqseries);
CAmpPhaseFrequencySeries_Init(&mode3->freqseries,nsize);
//fill new values
gsl_vector_memcpy(mode->freqseries->freq,nfreq);
gsl_vector_memcpy(mode2->freqseries->freq,nfreq);
gsl_vector_memcpy(mode3->freqseries->freq,nfreq);
EvalCAmpPhaseSpline(splines1,mode->freqseries);
EvalCAmpPhaseSpline(splines2,mode2->freqseries);
EvalCAmpPhaseSpline(splines3,mode3->freqseries);
//clean up
gsl_vector_free(nfreq);
mode=mode->next;
}//end loop over modes
//clean up
ListmodesCAmpPhaseSpline_Destroy(listsplinesgen1);
ListmodesCAmpPhaseSpline_Destroy(listsplinesgen2);
ListmodesCAmpPhaseSpline_Destroy(listsplinesgen3);
}//end resampling
/* If LISAGenerateSignal failed (e.g. parameters out of bound), silently return -Infinity logL */
if(ret==FAILURE) {
overlap = -DBL_MAX;
}
else if(ret==SUCCESS) {
/* Computing the likelihood for each TDI channel - fstartobs is the max between the fstartobs of the injected and generated signals */
double fstartobs1 = Newtonianfoft(params1.m1, params1.m2, globalparams->deltatobs);
double fstartobs2 = Newtonianfoft(params2.m1, params2.m2, globalparams->deltatobs);
double fLow = fmax(__LISASimFD_Noise_fLow, globalparams->minf);
double fHigh = fmin(__LISASimFD_Noise_fHigh, globalparams->maxf);
ObjectFunction NoiseSn1 = NoiseFunction(globalparams->variant, globalparams->tagtdi, 1);
ObjectFunction NoiseSn2 = NoiseFunction(globalparams->variant, globalparams->tagtdi, 2);
ObjectFunction NoiseSn3 = NoiseFunction(globalparams->variant, globalparams->tagtdi, 3);
overlap = FDListmodesFresnelOverlap3Chan(signal1->TDI1Signal, signal1->TDI2Signal, signal1->TDI3Signal, signal2->TDI1Splines, signal2->TDI2Splines, signal2->TDI3Splines, &NoiseSn1, &NoiseSn2, &NoiseSn3, fLow, fHigh, fstartobs2, fstartobs1);
}
/* Clean up */
LISASignalCAmpPhase_Cleanup(signal1);
LISAInjectionCAmpPhase_Cleanup(signal2);
//cout<<" overlap="<<overlap<<endl;
return overlap;
}
/****************** Functions precomputing relevant values when using simplified likelihood *****************/
/* 22-mode only, in old convention */
/* NOTE: difference in convention, new convention has a factor 3 in the integrand defining <lm|l'm'> */
int LISAComputeSimpleLikelihoodPrecomputedValues22(SimpleLikelihoodPrecomputedValues22* simplelikelihoodvals22, LISAParams* params)
{
/* Check pointer for output */
if(simplelikelihoodvals22==NULL) {
printf("Error in LISAComputeSimpleLikelihoodPrecomputedValues: called with NULL pointer for SimpleLikelihoodPrecomputedValues.\n");
exit(1);
}
int ret;
double normalization = 0.;
double complex sa = 0.;
double complex se = 0.;
/* Convert parameters to L-frame */
double phiL = funcphiL(params);
double lambdL = funclambdaL_old(params);
double betaL = funcbetaL(params);
double psiL = funcpsiL_old(params);
double inc = params->inclination;
double d = params->distance / injectedparams->distance; /* Should be 1., since params should be injectedparams */
/* Precompute sa, se */
sa = funcsa(d, phiL, inc, lambdL, betaL, psiL);
se = funcse(d, phiL, inc, lambdL, betaL, psiL);
/* Generate 22 mode for the fixed mass and time parameters - same fstartobs and PN extension as in the GenerateInjectionCAmpPhase function */
ListmodesCAmpPhaseFrequencySeries* listROM = NULL;
/* Starting frequency corresponding to duration of observation deltatobs */
double fstartobs = 0.;
if(!(globalparams->deltatobs==0.)) fstartobs = Newtonianfoft(params->m1, params->m2, globalparams->deltatobs);
/* Generate the waveform with the ROM */
/* NOTE: SimEOBNRv2HMROM accepts masses and distances in SI units, whereas LISA params is in solar masses and Mpc */
/* NOTE: minf and deltatobs are taken into account if extension is allowed, but not maxf - restriction to the relevant frequency interval will occur in both the response prcessing and overlap computation */
/* If extending, taking into account both fstartobs and minf */
if(!(globalparams->tagextpn)) {
//printf("Not Extending signal waveform. Mfmatch=%g\n",globalparams->Mfmatch);
ret = SimEOBNRv2HMROM(&listROM, params->nbmode, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI, globalparams->setphiRefatfRef);
} else {
//printf("Extending signal waveform. Mfmatch=%g\n",globalparams->Mfmatch);
ret = SimEOBNRv2HMROMExtTF2(&listROM, params->nbmode, globalparams->Mfmatch, fmax(fstartobs, globalparams->minf), 0, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI, globalparams->setphiRefatfRef);
}
/* If the ROM waveform generation failed (e.g. parameters were out of bounds) return FAILURE */
if(ret==FAILURE){
printf("Failed to generate injection ROM\n");
return FAILURE;
}
/* Multiply the h22 amplitude by pi f L/c */
/* NOTE: here acts ONLY on the 22 mode - should be generalized if one extends this function to higher modes */
double L = globalparams->variant->ConstL;
CAmpPhaseFrequencySeries* h22 = ListmodesCAmpPhaseFrequencySeries_GetMode(listROM, 2, 2)->freqseries;
gsl_vector* vfreq = h22->freq;
double* freq = vfreq->data;
double* areal = h22->amp_real->data;
double* aimag = h22->amp_imag->data;
for(int i=0; i<vfreq->size; i++) {
areal[i] *= PI*L/C_SI*freq[i];
aimag[i] *= PI*L/C_SI*freq[i];
}
/* Precompute overlap of 22 mode with itself */
/* Build spline interpolation */
ListmodesCAmpPhaseSpline* listsplines = NULL;
BuildListmodesCAmpPhaseSpline(&listsplines, listROM);
/* Precompute the inner product (h|h) - takes into account the length of the observation with deltatobs */
double fLow = fmax(__LISASimFD_Noise_fLow, globalparams->minf);
double fHigh = fmin(__LISASimFD_Noise_fHigh, globalparams->maxf);
ObjectFunction NoiseSn = NoiseFunction(globalparams->variant, globalparams->tagtdi, 1); /* We use the first noise function - will be A and E, in this approximation at low-f we simply ignore the T channel - NOTE: we could add some checking that the tagtdi selector as well as LISA variant make sense */
/* Compute overlap itself */
CAmpPhaseSpline* splineh22 = ListmodesCAmpPhaseSpline_GetMode(listsplines, 2, 2)->splines;
normalization = FDSinglemodeFresnelOverlap(h22, splineh22, &NoiseSn, fLow, fHigh);
/* Output */
simplelikelihoodvals22->normalization = normalization;
simplelikelihoodvals22->sa = sa;
simplelikelihoodvals22->se = se;
/* NOTE Cleanup */
ListmodesCAmpPhaseFrequencySeries_Destroy(listROM);
ListmodesCAmpPhaseSpline_Destroy(listsplines);
return(SUCCESS);
}
/* Generalization to set of modes, in new convention */
/* NOTE: difference in convention, new convention has a factor 3 in the integrand defining <lm|l'm'> */
/* <lm|l'm'> = \int df/Sn (3L/2 * 2\pi f)^2 h_{lm} h_{l'm'}^{*} */
int LISAComputeSimpleLikelihoodPrecomputedValuesHM(SimpleLikelihoodPrecomputedValuesHM* simplelikelihoodvalsHM, LISAParams* params)
{
/* Check pointer for output */
if(simplelikelihoodvalsHM==NULL) {
printf("Error in LISAComputeSimpleLikelihoodPrecomputedValues: called with NULL pointer for SimpleLikelihoodPrecomputedValues.\n");
exit(1);
}
int ret;
double Lambda_lm_lpmp = 0;
double complex sa_lm = 0.;
double complex se_lm = 0.;
/* Allocate output */
simplelikelihoodvalsHM->Lambda_lm_lpmp = gsl_matrix_alloc(params->nbmode, params->nbmode);
simplelikelihoodvalsHM->sa_lm_real = gsl_vector_alloc(params->nbmode);
simplelikelihoodvalsHM->sa_lm_imag = gsl_vector_alloc(params->nbmode);
simplelikelihoodvalsHM->se_lm_real = gsl_vector_alloc(params->nbmode);
simplelikelihoodvalsHM->se_lm_imag = gsl_vector_alloc(params->nbmode);
/* Convert parameters to L-frame */
double phiL = funcphiL(params);
double lambdL = funclambdaL(params);
double betaL = funcbetaL(params);
double psiL = funcpsiL(params);
double inc = params->inclination;
double d = params->distance / injectedparams->distance; /* Should be 1., since params should be injectedparams */
/* Precompute the set of sa_lm, se_lm */
for(int k=0; k<params->nbmode; k++) {
int l = listmode[k][0];
int m = listmode[k][1];
sa_lm = funcsa_lm(l, m, d, phiL, inc, lambdL, betaL, psiL);
se_lm = funcse_lm(l, m, d, phiL, inc, lambdL, betaL, psiL);
gsl_vector_set(simplelikelihoodvalsHM->sa_lm_real, k, creal(sa_lm));
gsl_vector_set(simplelikelihoodvalsHM->sa_lm_imag, k, cimag(sa_lm));
gsl_vector_set(simplelikelihoodvalsHM->se_lm_real, k, creal(se_lm));
gsl_vector_set(simplelikelihoodvalsHM->se_lm_imag, k, cimag(se_lm));
}
/* Generate 22 mode for the fixed mass and time parameters - same fstartobs and PN extension as in the GenerateInjectionCAmpPhase function */
ListmodesCAmpPhaseFrequencySeries* listROM = NULL;
/* Starting frequency corresponding to duration of observation deltatobs */
double fstartobs = 0.;
if(!(globalparams->deltatobs==0.)) fstartobs = Newtonianfoft(params->m1, params->m2, globalparams->deltatobs);
/* Generate the waveform with the ROM */
/* NOTE: SimEOBNRv2HMROM accepts masses and distances in SI units, whereas LISA params is in solar masses and Mpc */
/* NOTE: minf and deltatobs are taken into account if extension is allowed, but not maxf - restriction to the relevant frequency interval will occur in both the response prcessing and overlap computation */
/* If extending, taking into account both fstartobs and minf */
if(!(globalparams->tagextpn)) {
//printf("Not Extending signal waveform. Mfmatch=%g\n",globalparams->Mfmatch);
ret = SimEOBNRv2HMROM(&listROM, params->nbmode, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI, globalparams->setphiRefatfRef);
} else {
//printf("Extending signal waveform. Mfmatch=%g\n",globalparams->Mfmatch);
ret = SimEOBNRv2HMROMExtTF2(&listROM, params->nbmode, globalparams->Mfmatch, fmax(fstartobs, globalparams->minf), 0, params->tRef - injectedparams->tRef, params->phiRef, globalparams->fRef, (params->m1)*MSUN_SI, (params->m2)*MSUN_SI, (params->distance)*1e6*PC_SI, globalparams->setphiRefatfRef);
}
/* If the ROM waveform generation failed (e.g. parameters were out of bounds) return FAILURE */
if(ret==FAILURE){
printf("Failed to generate injection ROM\n");
return FAILURE;
}
/* Multiply the hlm amplitudes by 3L/2 * 2pi f / c */
double L = globalparams->variant->ConstL;
for(int k=0; k<params->nbmode; k++) {
int l = listmode[k][0];
int m = listmode[k][1];
CAmpPhaseFrequencySeries* hlm = ListmodesCAmpPhaseFrequencySeries_GetMode(listROM, l, m)->freqseries;
gsl_vector* vfreq = hlm->freq;
double* freq = vfreq->data;
double* areal = hlm->amp_real->data;
double* aimag = hlm->amp_imag->data;
for(int i=0; i<vfreq->size; i++) {
areal[i] *= 3*PI*L/C_SI*freq[i];
aimag[i] *= 3*PI*L/C_SI*freq[i];
}
}
/* Precompute overlap of frequency-weighted hlm modes with themselves */
/* Build spline interpolation */
ListmodesCAmpPhaseSpline* listsplines = NULL;
BuildListmodesCAmpPhaseSpline(&listsplines, listROM);
/* Precompute the inner product (h|h) - takes into account the length of the observation with deltatobs */
double fLow = fmax(__LISASimFD_Noise_fLow, globalparams->minf);
double fHigh = fmin(__LISASimFD_Noise_fHigh, globalparams->maxf);
ObjectFunction NoiseSn = NoiseFunction(globalparams->variant, globalparams->tagtdi, 1); /* We use the first noise function - will be A and E, in this approximation at low-f we simply ignore the T channel - NOTE: we could add some checking that the tagtdi selector as well as LISA variant make sense */
/* Compute overlap itself */
for(int i=0; i<params->nbmode; i++) {
for(int j=0; j<params->nbmode; j++) {
int l = listmode[i][0];
int m = listmode[i][1];
int lp = listmode[j][0];
int mp = listmode[j][1];
CAmpPhaseSpline* splinehlm = ListmodesCAmpPhaseSpline_GetMode(listsplines, l, m)->splines;
CAmpPhaseFrequencySeries* hlpmp = ListmodesCAmpPhaseFrequencySeries_GetMode(listROM, lp, mp)->freqseries;
Lambda_lm_lpmp = FDSinglemodeFresnelOverlap(hlpmp, splinehlm, &NoiseSn, fLow, fHigh);
gsl_matrix_set(simplelikelihoodvalsHM->Lambda_lm_lpmp, i, j, Lambda_lm_lpmp);
}
}
/* Precompute sa_lm, se_lm */
/* NOTE Cleanup */
ListmodesCAmpPhaseFrequencySeries_Destroy(listROM);
ListmodesCAmpPhaseSpline_Destroy(listsplines);
return(SUCCESS);
}
/***************************** Functions handling the prior ******************************/
/* Functions to check that returned parameter values fit in prior boundaries */
/* For the mass parameters (first two cube parameters) m1/m2 - uses only comp, mtot, q constraints */
int PriorBoundaryCheckm1m2(LISAPrior *prior, double *Cube)
{
if (Cube[0] < Cube[1])
return 1;
if (Cube[0] < prior->comp_min || Cube[0] > prior->comp_max || Cube[1] < prior->comp_min || Cube[1] > prior->comp_max)
return 1;
if (Cube[0] + Cube[1] < prior->mtot_min || Cube[0] + Cube[1] > prior->mtot_max)
return 1;
/* Always enforce qmax limit - limit of validity for the EOBNRv2HMROM model */
if (Cube[0] < Cube[1] || Cube[0] / Cube[1] > prior->qmax)
return 1;
return 0;
}
/* For the mass parameters (first two cube parameters) Mchirp/eta - uses only Mchirp, eta constraints */
int PriorBoundaryCheckMchirpeta(LISAPrior *prior, double *Cube)
{
/* Cube contains physical parameters with m1,m2 as the first two - translate to Mchirp, eta */
double m1 = Cube[0];
double m2 = Cube[1];
double Mchirp = Mchirpofm1m2(m1, m2);
double eta = etaofm1m2(m1, m2);
if (eta > 0.25)
return 1;
if (Mchirp < prior->Mchirp_min || Mchirp > prior->Mchirp_max || eta < prior->eta_min || eta > prior->eta_max)
return 1;
/* Always enforce qmax limit - limit of validity for the EOBNRv2HMROM model */
double delta = sqrt(1-4.*eta);
if ((1.+delta)/(1.-delta) > prior->qmax)
return 1;
return 0;
}
/* Utility prior functions to convert from Cube to common distributions, and back */
double CubeToFlatPrior(double r, double x1, double x2)
{
return x1 + r * (x2 - x1);
}
double FlatPriorToCube(double y, double x1, double x2)
{
return (y - x1) / (x2 - x1);
}
double CubeToLogFlatPrior(double r, double x1, double x2)
{
return exp(log(x1) + r * (log(x2) - log(x1)));
}
double LogFlatPriorToCube(double y, double x1, double x2)
{
return (log(y) - log(x1)) / (log(x2) - log(x1));
}
double CubeToPowerPrior(double p, double r, double x1, double x2)
{
double pp = p + 1.0;
return pow(r * pow(x2, pp) + (1.0 - r) * pow(x1, pp), 1.0 / pp);
}
double PowerPriorToCube(double p, double y, double x1, double x2)
{
double pp = p + 1.0;
return (pow(y, pp) - pow(x1, pp)) / (pow(x2, pp) - pow(x1, pp));
}
double CubeToSinPrior(double r, double x1, double x2)
{
return acos((1.0-r)*cos(x1)+r*cos(x2));
}
double SinPriorToCube(double y, double x1, double x2) /* Note: on [0,pi] cos(x1)>cos(y)>cos(x2), not important */
{
return (cos(x1) - cos(y))/(cos(x1) - cos(x2));
}
double CubeToCosPrior(double r, double x1, double x2)
{
return asin((1.0-r)*sin(x1)+r*sin(x2));
}
double CosPriorToCube(double y, double x1, double x2) /* Note: on [-pi/2,pi/2] normally sin(x1)<sin(y)<sin(x2), not important */
{
return (sin(y) - sin(x1))/(sin(x2) - sin(x1));
}
double CubeToGaussianPrior(double r, double mean, double sigma)
{
return gsl_cdf_gaussian_Pinv(r,sigma) + mean;
}
|
convolutiondepthwise_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*9;
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i+1 < outh; i+=2)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
float sum2 = bias0;
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*9;
float* outptr = out;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
omp_directive.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
/**
* 记录openmp制导指令用法
* parallel, for, sections, single, master, threadprivate
*
* 具体介绍可以参考 http://blog.zhangjikai.com/tags/OpenMP/
*/
void omp_parallel() {
#pragma omp parallel
{
printf("The parallel region is executed by thread %d\n",
omp_get_thread_num());
if ( omp_get_thread_num() == 2 ) {
printf(" Thread %d does things differently\n",
omp_get_thread_num());
}
}
}
void omp_for() {
int n = 9, i;
#pragma omp parallel for shared(n) private(i)
for(i = 0; i < n; i++) {
printf("Thread %d executes loop iteration %d\n", omp_get_thread_num(),i);
}
}
/**
* 使用#pragma omp sections 和 #pragma omp section, 来使不同的线程执行不同的任务
* 如果线程数量大于section数量, 那么多余的线程会处于空闲状态(idle)
* 如果线程数量少于section数量, 那么一个线程会执行多个section代码
*/
void funcA() {
printf("In funcA: this section is executed by thread %d\n",
omp_get_thread_num());
}
void funcB() {
printf("In funcB: this section is executed by thread %d\n",
omp_get_thread_num());
}
void omp_sections() {
#pragma omp parallel sections
{
#pragma omp section
{
(void)funcA();
}
#pragma omp section
{
(void)funcB();
}
}
}
void omp_single() {
#pragma omp parallel
{
// 只有一个线程会执行这段代码, 其他线程会等待该线程执行完毕
#pragma omp single
{
printf("Single construct executed by thread %d\n\n", omp_get_thread_num());
}
// A barrier is automatically inserted here
printf("thread %d is running\n", omp_get_thread_num());
}
}
void omp_master() {
#pragma omp parallel
{
// 只有主线程执行, 不会自动插入barrier, 需要手动同步
#pragma omp master
{
printf("master construct executed by thread %d\n\n", omp_get_thread_num());
}
#pragma omp barrier
printf("thread %d is running\n", omp_get_thread_num());
}
}
int counter = 10;
#pragma omp threadprivate(counter)
void omp_threadprivate() {
printf("counter is %d\n", counter);
#pragma omp parallel copyin(counter)
{
counter = omp_get_thread_num() + counter + 1;
printf("thread %d : counter is %d\n", omp_get_thread_num(), counter);
}
printf("counter is %d\n", counter);
}
int main() {
// omp_parallel();
// omp_for();
// omp_sections();
// omp_single();
// omp_master();
omp_threadprivate();
}
|
GB_unaryop__minv_int16_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int16_bool
// op(A') function: GB_tran__minv_int16_bool
// C type: int16_t
// A type: bool
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 16)
#define GB_ATYPE \
bool
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int16_bool
(
int16_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int16_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_fp64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp64_uint64
// op(A') function: GB_tran__lnot_fp64_uint64
// C type: double
// A type: uint64_t
// cast: double cij = (double) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp64_uint64
(
double *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__isge_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_fp32)
// A*D function (colscale): GB (_AxD__isge_fp32)
// D*A function (rowscale): GB (_DxB__isge_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_fp32)
// C=scalar+B GB (_bind1st__isge_fp32)
// C=scalar+B' GB (_bind1st_tran__isge_fp32)
// C=A+scalar GB (_bind2nd__isge_fp32)
// C=A'+scalar GB (_bind2nd_tran__isge_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_FP32 || GxB_NO_ISGE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isge_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
NeuralNetwork_OMP_CPU5.c | /* NEURAL NETWORK OMP CPU5.c
* by Lut99
*
* Created:
* 4/18/2020, 11:25:46 PM
* Last edited:
* 19/11/2020, 17:19:00
* Auto updated?
* Yes
*
* Description:
* The NeuralNetwork class implements a matrix-based Feedforward Neural
* Network which is hardcoded to use Mean Squared Error for cost function and
* sigmoid as activation function.
*
* This file implements the fifth of eight different OpenMP-optimised
* versions for the CPU. It optimises the forward pass only using threads for
* the outer loops and SIMD for the inner loops.
**/
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include "NeuralNetwork.h"
#define WEIGHTS_MIN -3.0
#define WEIGHTS_MAX 3.0
#define BIAS_MIN -3.0
#define BIAS_MAX 3.0
#define NUM_THREADS 16
/***** OPTIONAL PARAMETERS *****/
static unsigned int n_threads = 16;
/***** OPENMP DECLARATIONS *****/
extern int omp_set_num_threads();
extern int omp_get_num_procs();
extern int omp_get_thread_num();
/***** HELPER FUNCTIONS *****/
#define TIMEVAL_TO_MS(T_START, T_END) (((T_END.tv_sec - T_START.tv_sec) * 1000000 + (T_END.tv_usec - T_START.tv_usec)) / 1000000.0)
extern size_t max(size_t length, const size_t* list);
/***** NEURAL NETWORK OPERATIONS *****/
void nn_train(neural_net* nn, size_t n_samples, double** inputs, double** expected, double learning_rate, size_t n_iterations) {
#ifdef BENCHMARK
// Declare all timers
struct timeval s_total, e_total, s_iters, e_iters, s_fwd, e_fwd, s_bck_out, e_bck_out, s_bck_hid, e_bck_hid, s_upd, e_upd;
// Set some shortcuts for the timers
size_t half_iters = n_iterations / 2;
size_t half_samples = n_samples / 2;
// Start the total timer
gettimeofday(&s_total, NULL);
#endif
// Also obtain links to all biases / matrices
double** biases = nn->biases;
double** weights = nn->weights;
// Make some shortcuts for the number-of-nodes information
size_t n_layers = nn->n_layers;
size_t* nodes_per_layer = nn->nodes_per_layer;
// Initialize the temporary delta memory to the correct size
double* deltas = malloc(sizeof(double) * max(n_layers, nodes_per_layer));
double* prev_deltas = malloc(sizeof(double) * max(n_layers, nodes_per_layer));
// Create a list that is used to store intermediate outputs. The first input layer (=first column)
// is linked and not copied to the input data
double* layer_outputs[n_samples][n_layers];
for (size_t s = 0; s < n_samples; s++) {
// Link the input layer
layer_outputs[s][0] = inputs[s];
// Allocate arrays for the other layers
for (size_t l = 1; l < n_layers; l++) {
layer_outputs[s][l] = malloc(sizeof(double) * nodes_per_layer[l]);
}
}
// Create the delta_biases and delta_weights arrays / matrices
double* delta_biases[nn->n_weights];
double* delta_weights[nn->n_weights];
for(size_t l = 0; l < nn->n_weights; l++) {
delta_biases[l] = malloc(sizeof(double) * nodes_per_layer[l + 1]);
delta_weights[l] = malloc(sizeof(double) * nodes_per_layer[l] * nodes_per_layer[l + 1]);
// Fill with zeros
for (size_t n = 0; n < nodes_per_layer[l + 1]; n++) {
delta_biases[l][n] = 0;
for (size_t prev_n = 0; prev_n < nodes_per_layer[l]; prev_n++) {
delta_weights[l][prev_n * nodes_per_layer[l + 1] + n] = 0;
}
}
}
#ifdef BENCHMARK
// Start the iterations timer
gettimeofday(&s_iters, NULL);
#endif
// Perform the training for n_iterations (always)
for (size_t i = 0; i < n_iterations; i++) {
#pragma omp parallel for schedule(static)
for (size_t s = 0; s < n_samples; s++) {
/***** FORWARD PASS *****/
#ifdef BENCHMARK
// Start the forward pass timer
if (i == half_iters && s == half_samples) {
gettimeofday(&s_fwd, NULL);
}
#endif
// sample_outputs is a 2D flattened array for this layer
double** sample_outputs = layer_outputs[s];
// Iterate over each layer to feedforward through the network
for (size_t l = 1; l < n_layers; l++) {
// Get some references to the bias list, weight matrix and outputs of the previous and this layer
double* bias = biases[l - 1];
double* weight = weights[l - 1];
double* prev_output = sample_outputs[l - 1];
double* output = sample_outputs[l];
// Compute the activation for each node on this layer
size_t this_nodes = nodes_per_layer[l];
size_t prev_nodes = nodes_per_layer[l - 1];
for (size_t n = 0; n < this_nodes; n++) {
// Sum the weighted inputs for this node
double z = bias[n];
#pragma omp simd
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
z += prev_output[prev_n] * weight[prev_n * this_nodes + n];
}
// Run the activation function over this input and store it in the output
output[n] = 1 / (1 + exp(-z));
}
}
#ifdef BENCHMARK
// End the forward timer, start the backward pass output timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_fwd, NULL);
}
#endif
}
/***** BACKWARD PASS *****/
// Implementation: https://towardsdatascience.com/simple-neural-network-implementation-in-c-663f51447547
// Loop through all samples to compute the backward cost
size_t last_nodes = nodes_per_layer[n_layers - 1];
size_t last_prev_nodes = nodes_per_layer[n_layers - 2];
double* last_delta_bias = delta_biases[n_layers - 2];
double* last_delta_weight = delta_weights[n_layers - 2];
for (size_t s = 0; s < n_samples; s++) {
#ifdef BENCHMARK
// End the forward timer, start the backward pass output timer
if (i == half_iters && s == half_samples) {
gettimeofday(&s_bck_out, NULL);
}
#endif
// Backpropagate the error from the last layer to the first.
double** sample_outputs = layer_outputs[s];
double* sample_expected = expected[s];
// Do the output layer: compute the deltas
double* output = sample_outputs[n_layers - 1];
#pragma omp simd
for (size_t n = 0; n < last_nodes; n++) {
double output_val = output[n];
prev_deltas[n] = (sample_expected[n] - output_val) * output_val * (1 - output_val);
}
// Do the output layer: compute the bias & weight updates
// Add all deltas as delta_biases for this layer
#pragma omp simd
for (size_t n = 0; n < last_nodes; n++) {
last_delta_bias[n] += prev_deltas[n];
}
// Same for all the weights, except we compute the delta_weights first
double* last_prev_output = sample_outputs[n_layers - 2];
for (size_t prev_n = 0; prev_n < last_prev_nodes; prev_n++) {
#pragma omp simd
for (size_t n = 0; n < last_nodes; n++) {
last_delta_weight[prev_n * last_nodes + n] += last_prev_output[prev_n] * prev_deltas[n];
}
}
#ifdef BENCHMARK
// End the backward pass output timer, start the backward pass hidden timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_bck_out, NULL);
gettimeofday(&s_bck_hid, NULL);
}
#endif
// Then, the rest of the hidden layers
for (size_t l = n_layers - 2; l > 0; l--) {
double* delta_bias = delta_biases[l - 1];
double* delta_weight = delta_weights[l - 1];
double* output = sample_outputs[l];
double* prev_output = sample_outputs[l - 1];
size_t next_nodes = nodes_per_layer[l + 1];
size_t this_nodes = nodes_per_layer[l];
size_t prev_nodes = nodes_per_layer[l - 1];
// Loop through all nodes in this layer to compute their deltas by summing all deltas of the next layer in a weighted fashion
double* weight_next = weights[l];
for (size_t n = 0; n < this_nodes; n++) {
// Take the weighted sum of all connection of that node with this layer
double error = 0;
#pragma omp simd
for (size_t next_n = 0; next_n < next_nodes; next_n++) {
error += prev_deltas[next_n] * weight_next[n * next_nodes + next_n];
}
// Multiply the error with the derivative of the activation function to find the result
double output_val = output[n];
deltas[n] = error * output_val * (1 - output_val);
}
// Add all deltas as delta_biases for this layer
#pragma omp simd
for (size_t n = 0; n < this_nodes; n++) {
delta_bias[n] += deltas[n];
}
// Same for all the weights, except we compute the delta_weights first
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
#pragma omp simd
for (size_t n = 0; n < this_nodes; n++) {
delta_weight[prev_n * this_nodes + n] += prev_output[prev_n] * deltas[n];
}
}
// Swap the two deltas
double* temp = deltas;
deltas = prev_deltas;
prev_deltas = temp;
}
#ifdef BENCHMARK
// End the backward pass hidden timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_bck_hid, NULL);
}
#endif
}
#ifdef BENCHMARK
// Start the updates timer
if (i == half_iters) {
gettimeofday(&s_upd, NULL);
}
#endif
// Actually update the weights, and reset the delta updates to 0 for next iteration
#pragma omp parallel for schedule(static)
for (size_t l = 0; l < nn->n_weights; l++) {
double* bias = biases[l];
double* delta_bias = delta_biases[l];
double* weight = weights[l];
double* delta_weight = delta_weights[l];
// Update the biases & reset delta_biases
size_t this_nodes = nodes_per_layer[l + 1];
#pragma omp simd
for (size_t n = 0; n < this_nodes; n++) {
bias[n] += delta_bias[n] * learning_rate;
delta_bias[n] = 0;
}
// Update the weights & reset delta_weights
size_t prev_nodes = nodes_per_layer[l];
#pragma omp simd
for (size_t i = 0; i < this_nodes * prev_nodes; i++) {
weight[i] += delta_weight[i] * learning_rate;
delta_weight[i] = 0;
}
}
#ifdef BENCHMARK
// Stop the updates timer
if (i == half_iters) {
gettimeofday(&e_upd, NULL);
}
#endif
}
#ifdef BENCHMARK
// End the iterations timer
gettimeofday(&e_iters, NULL);
#endif
// Cleanup
// Free the delta biases / weights
for(size_t l = 0; l < n_layers - 1; l++) {
free(delta_biases[l]);
free(delta_weights[l]);
}
// Free the layer_outputs (skip the first, as these merely link the input rather than copy 'em)
for (size_t s = 0; s < n_samples; s++) {
for (size_t l = 1; l < n_layers; l++) {
free(layer_outputs[s][l]);
}
}
// Cleanup the deltas
free(deltas);
free(prev_deltas);
#ifdef BENCHMARK
// End the total timer
gettimeofday(&e_total, NULL);
// Print the results
printf("%f\n", TIMEVAL_TO_MS(s_total, e_total));
printf("%f\n", TIMEVAL_TO_MS(s_iters, e_iters));
printf("%f\n", TIMEVAL_TO_MS(s_fwd, e_fwd));
printf("%f\n", TIMEVAL_TO_MS(s_bck_out, e_bck_out));
printf("%f\n", TIMEVAL_TO_MS(s_bck_hid, e_bck_hid));
printf("%f\n", TIMEVAL_TO_MS(s_upd, e_upd));
#endif
}
/***** OTHER TOOLS *****/
void parse_opt_args(int argc, char** argv) {
// Parse and set number of threads as first argument
if (argc >= 1) {
// Set the number of threads
n_threads = atoi(argv[0]);
}
omp_set_num_threads(n_threads);
}
void print_opt_args() {
printf(" - Variation : OpenMP CPU 5 (Forward only, with SIMD)\n");
printf(" - Number of threads : %u\n", n_threads);
}
|
regularisation.h | /* Incremental diffusion regularisation of parametrised transformation
using (globally optimal) belief-propagation on minimum spanning tree.
Fast distance transform uses squared differences.
Similarity cost for each node and label has to be given as input.
*/
void messageDT(int ind,float* data,short* indout,int len1,float offsetx,float offsety,float offsetz){
//int ind1=get_global_id(0)+start;
// int ind=ordered[ind1];
int len2=len1*len1;
int len3=len1*len1*len1;
float z[len1*2+1];
float* val;
float* valout;
short* indo;
float* valb;
float* valb2;
float buffer[len3];
float buffer2[len3];
int* indb;
int* indb2;
int bufferi[len3];
int bufferi2[len3];
for(int i=0;i<len1*2+1;i++){
z[i]=(i-len1+offsety)*(i-len1+offsety);
}
for(int k1=0;k1<len1;k1++){
for(int j1=0;j1<len1;j1++){
//valb=buffer2+(j1*len1+k1*len1*len1);//
val=data+ind*len3+(j1*len1+k1*len1*len1);
valb2=buffer+(j1*len1+k1*len1*len1);
indb=bufferi+(j1*len1+k1*len1*len1);
int num=(j1*len1+k1*len1*len1);
for(int i=0;i<len1;i++){
float minval=val[0]+z[i+len1];
int minind=0;
for(int j=0;j<len1;j++){
bool b=(val[j]+z[i-j+len1]<minval);
minval=b?val[j]+z[i-j+len1]:minval;
minind=b?j:minind;
}
valb2[i]=minval;
indb[i]=minind+num;
}
}
}
for(int i=0;i<len1*2;i++){
z[i]=(i-len1+offsetx)*(i-len1+offsetx);
}
for(int k1=0;k1<len1;k1++){
for(int i1=0;i1<len1;i1++){
valb=buffer+(i1+k1*len1*len1);
valb2=buffer2+(i1+k1*len1*len1);
indb=bufferi+(i1+k1*len1*len1);
indb2=bufferi2+(i1+k1*len1*len1);
for(int i=0;i<len1;i++){
float minval=valb[0]+z[i+len1];
int minind=0;
for(int j=0;j<len1;j++){
bool b=(valb[j*len1]+z[i-j+len1]<minval);
minval=b?valb[j*len1]+z[i-j+len1]:minval;
minind=b?j:minind;
}
valb2[i*len1]=minval;
indb2[i*len1]=indb[minind*len1];
}
}
}
for(int i=0;i<len1*2;i++){
z[i]=(i-len1+offsetz)*(i-len1+offsetz);
}
for(int j1=0;j1<len1;j1++){
for(int i1=0;i1<len1;i1++){
valb=buffer2+(i1+j1*len1);
//valb2=buffer+(i1+j1*len1);
valout=data+ind*len3+(i1+j1*len1);
indb=bufferi2+(i1+j1*len1);
//indb2=bufferi+(i1+j1*len1);
indo=indout+ind*len3+(i1+j1*len1);
for(int i=0;i<len1;i++){
float minval=valb[0]+z[i+len1];
int minind=0;
for(int j=0;j<len1;j++){
bool b=(valb[j*len2]+z[i-j+len1]<minval);
minval=b?valb[j*len2]+z[i-j+len1]:minval;
minind=b?j:minind;
}
valout[i*len2]=minval;
indo[i*len2]=indb[minind*len2];
}
}
}
}
void regularisationCL(float* costall,float* u0,float* v0,float* w0,float* u1,float* v1,float* w1,int hw,int step1,float quant,int* ordered,int* parents,float* edgemst)
{
int m2=image_m;
int n2=image_n;
int o2=image_o;
int m=m2/step1;
int n=n2/step1;
int o=o2/step1;
timeval time1,time2;
int sz=m*n*o;
int len=hw*2+1;
int len1=len;
int len2=len*len*len;
int len3=len*len*len;
gettimeofday(&time1, NULL);
short *allinds=new short[sz*len2];
float *cost1=new float[len2];
float *vals=new float[len2];
int *inds=new int[len2];
//calculate level boundaries for parallel implementation
int* levels=new int[sz];
for(int i=0;i<sz;i++){
levels[i]=0;
}
for(int i=1;i<sz;i++){
int ochild=ordered[i];
int oparent=parents[ordered[i]];
levels[ochild]=levels[oparent]+1;
}
int maxlev=1+*max_element(levels,levels+sz);
int* numlev=new int[maxlev];
int* startlev=new int[maxlev];
for(int i=0;i<maxlev;i++){
numlev[i]=0;
}
for(int i=0;i<sz;i++){
numlev[levels[i]]++;
}
startlev[0]=numlev[0];
for(int i=1;i<maxlev;i++){ //cumulative sum
startlev[i]=startlev[i-1]+numlev[i];
}
delete levels;
int xs1,ys1,zs1,xx,yy,zz,xx2,yy2,zz2;
for(int i=0;i<len2;i++){
cost1[i]=0;
}
//MAIN LOOP - TO BE PARALLELISED
int frac=(int)(sz/25);
int counti=0;
int counti2=0;
bool* processed=new bool[sz];
for(int i=0;i<sz;i++){
processed[i]=false;
}
int dblcount=0;
float timeCopy=0;
float timeMessage=0;
//calculate mst-cost
for(int lev=maxlev-1;lev>0;lev--){
int start=startlev[lev-1];
int length=numlev[lev];
gettimeofday(&time1, NULL);
for(int i=start;i<start+length;i++){
int ochild=ordered[i];
for(int l=0;l<len2;l++){
costall[ochild*len2+l]*=edgemst[ochild];
}
}
#pragma omp parallel for
for(int i=start;i<start+length;i++){
int ochild=ordered[i];
int oparent=parents[ordered[i]];
float offsetx=(u0[oparent]-u0[ochild])/(float)quant;
float offsety=(v0[oparent]-v0[ochild])/(float)quant;
float offsetz=(w0[oparent]-w0[ochild])/(float)quant;
messageDT(ochild,costall,allinds,len1,offsetx,offsety,offsetz);
}
gettimeofday(&time2, NULL);
timeMessage+=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6);
gettimeofday(&time1, NULL);
//copy necessary if vectorisation is used (otherwise multiple simultaneous +='s)
int start0=startlev[lev-1];
int length0=numlev[lev];
for(int i=start0;i<start0+length0;i++){
int ochild=ordered[i];
int oparent=parents[ordered[i]];
float minval=*min_element(costall+ochild*len2,costall+ochild*len2+len3);
for(int l=0;l<len2;l++){
costall[oparent*len2+l]+=(costall[ochild*len2+l]-minval);///edgemst[ochild];//transp
//edgemst[ochild]*
}
}
gettimeofday(&time2, NULL);
timeCopy+=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6);
}
//dense displacement space
float* xs=new float[len*len*len];
float* ys=new float[len*len*len];
float* zs=new float[len*len*len];
for(int i=0;i<len;i++){
for(int j=0;j<len;j++){
for(int k=0;k<len;k++){
xs[i+j*len+k*len*len]=(j-hw)*quant;
ys[i+j*len+k*len*len]=(i-hw)*quant;
zs[i+j*len+k*len*len]=(k-hw)*quant;
}
}
}
int *selected=new int[sz];
//mst-cost & select displacement for root note
int i=0;
int oroot=ordered[i];
for(int l=0;l<len2;l++){
cost1[l]=costall[oroot*len2+l];//transp
}
float value=cost1[0]; int index=0;
for(int l=0;l<len2;l++){
if(cost1[l]<value){
value=cost1[l];
index=l;
}
allinds[oroot*len2+l]=l; //transp
}
selected[oroot]=index;
u1[oroot]=xs[index]+u0[oroot];
v1[oroot]=ys[index]+v0[oroot];
w1[oroot]=zs[index]+w0[oroot];
//select displacements and add to previous deformation field
for(int i=1;i<sz;i++){
int ochild=ordered[i];
int oparent=parents[ordered[i]];
//select from argmin of based on parent selection
//index=allinds[ochild+selected[oparent]*sz];
index=allinds[ochild*len2+selected[oparent]]; //transp
selected[ochild]=index;
u1[ochild]=xs[index]+u0[ochild];
v1[ochild]=ys[index]+v0[ochild];
w1[ochild]=zs[index]+w0[ochild];
}
//cout<<"Deformation field calculated!\n";
delete cost1;
delete vals;
delete inds;
delete allinds;
delete selected;
}
|
gmapper.c | #define _MODULE_GMAPPER
#ifndef CXXFLAGS
#define CXXFLAGS "?"
#endif
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <math.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <zlib.h>
#include <omp.h> // OMP multithreading
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <getopt.h>
#include "../gmapper/gmapper.h"
#include "../gmapper/seeds.h"
#include "../gmapper/genome.h"
#include "../gmapper/mapping.h"
#include "../common/hash.h"
#include "../common/fasta.h"
#include "../common/util.h"
#include "../common/version.h"
#include "../common/sw-full-common.h"
#include "../common/sw-full-cs.h"
#include "../common/sw-full-ls.h"
#include "../common/sw-vector.h"
#include "../common/output.h"
#include "../common/input.h"
#include "../common/read_hit_heap.h"
#include "../common/sw-post.h"
/* heaps */
/*
typedef struct ptr_and_sz {
void * ptr;
size_t sz;
} ptr_and_sz;
*/
//DEF_HEAP(uint32_t, char *, out)
DEF_HEAP(uint32_t, struct ptr_and_sz, out)
/*
Get hit stats
*/
int
hit_edit_distance(struct read_hit * rh) {
//find how many perfect matches, off by 1 matches and off by 2 matches
// int matches; /* # of matches */
// int mismatches; /* # of substitutions */
// int insertions; /* # of insertions */
// int deletions; /* # of deletions */
int edit_distance=0;
edit_distance+=rh->sfrp->mismatches;
edit_distance+=rh->sfrp->insertions;
edit_distance+=rh->sfrp->deletions;
assert(edit_distance>=0);
return edit_distance;
}
/*
* Free memory allocated by this read.
*/
void
read_free(struct read_entry * re)
{
free(re->name);
free(re->seq);
if (re->orig_seq!=re->seq) {
free(re->orig_seq);
}
if (Qflag) {
assert(re->qual!=NULL);
free(re->qual);
if (re->qual!=re->orig_qual) {
free(re->orig_qual);
}
#ifdef ENABLE_LOW_QUALITY_FILTER
if (re->filter_qual) {
free(re->filter_qual);
}
#endif
assert(re->plus_line!=NULL);
free(re->plus_line);
}
}
void
read_free_full(struct read_entry * re, count_t * counter)
{
if (shrimp_mode == MODE_COLOUR_SPACE && Qflag) {
if (re->crossover_score != NULL) {
free(re->crossover_score);
re->crossover_score = NULL;
}
}
if (re->mapidx[0] != NULL)
my_free(re->mapidx[0], n_seeds * re->max_n_kmers * sizeof(re->mapidx[0][0]), counter, "mapidx [%s]", re->name);
if (re->mapidx[1] != NULL)
my_free(re->mapidx[1], n_seeds * re->max_n_kmers * sizeof(re->mapidx[0][0]), counter, "mapidx [%s]", re->name);
read_free_hit_list(re, counter);
read_free_anchor_list(re, counter);
if (re->n_ranges > 0)
free(re->ranges);
if (re->n_final_unpaired_hits > 0) {
int i;
for (i = 0; i < re->n_final_unpaired_hits; i++)
free_sfrp(&re->final_unpaired_hits[i].sfrp, re, counter);
my_free(re->final_unpaired_hits, re->n_final_unpaired_hits * sizeof(re->final_unpaired_hits[0]),
counter, "final_unpaired_hits [%s]", re->name);
re->n_final_unpaired_hits = 0;
re->final_unpaired_hits = NULL;
}
free(re->read[0]);
free(re->read[1]);
read_free(re);
}
void
readpair_free_full(pair_entry * peP, count_t * counterP)
{
int nip, i;
if (peP->n_final_paired_hits > 0) {
my_free(peP->final_paired_hits, peP->n_final_paired_hits * sizeof(peP->final_paired_hits[0]),
counterP, "final_paired_hits [%s,%s]", peP->re[0]->name, peP->re[1]->name);
peP->n_final_paired_hits = 0;
peP->final_paired_hits = NULL;
}
for (nip = 0; nip < 2; nip++) {
if (peP->final_paired_hit_pool_size[nip] > 0) {
for (i = 0; i < peP->final_paired_hit_pool_size[nip]; i++) {
free_sfrp(&peP->final_paired_hit_pool[nip][i].sfrp, peP->re[nip], counterP);
if (peP->final_paired_hit_pool[nip][i].n_paired_hit_idx > 0) {
my_free(peP->final_paired_hit_pool[nip][i].paired_hit_idx, peP->final_paired_hit_pool[nip][i].n_paired_hit_idx * sizeof(peP->final_paired_hit_pool[nip][i].paired_hit_idx[0]),
counterP, "paired_hit_idx [%s]", peP->re[nip]->name);
peP->final_paired_hit_pool[nip][i].n_paired_hit_idx = 0;
peP->final_paired_hit_pool[nip][i].paired_hit_idx = NULL;
}
}
my_free(peP->final_paired_hit_pool[nip], peP->final_paired_hit_pool_size[nip] * sizeof(peP->final_paired_hit_pool[nip][0]),
counterP, "final_paired_hit_pool[%d] [%s,%s]", nip, peP->re[0]->name, peP->re[1]->name);
peP->final_paired_hit_pool_size[nip] = 0;
peP->final_paired_hit_pool[nip] = NULL;
}
}
read_free_full(peP->re[0], counterP);
read_free_full(peP->re[1], counterP);
}
/*
* Reverse read.
*
* This is useful for dealing with the various possibilities for matepair orientation in a unified way.
*/
static void
read_reverse(struct read_entry * re) {
uint32_t * tmp1 = re->read[0];
re->read[0] = re->read[1];
re->read[1] = tmp1;
int tmp2 = re->initbp[0];
re->initbp[0] = re->initbp[1];
re->initbp[1] = tmp2;
re->input_strand = 1 - re->input_strand;
}
/*
static uint
get_contig_number_from_name(char const * c)
{
int cn;
// hack: accept '>' for cn=0
if (*c == '>')
return 0;
for (cn = 0; cn < num_contigs; cn++) {
if (!strcmp(c, contig_names[cn]))
break;
}
return cn;
}
*/
/*
* Compute range limitations for this read.
*/
/*
static void
read_compute_ranges(struct read_entry * re)
{
char * r, * r_save, * c, * c_save;
uint g_start, g_end;
int cn, st;
assert(re->range_string != NULL);
for (r = strtok_r(re->range_string, " ", &r_save); r != NULL; r = strtok_r(NULL, " ", &r_save))
{
c = strtok_r(r, ",", &c_save); // contig name
if (c == NULL)
continue;
cn = get_contig_number_from_name(c);
if (cn >= num_contigs)
continue;
c = strtok_r(NULL, ",", &c_save); // strand
if (c == NULL)
continue;
if (*c == '+')
st = 0;
else if (*c == '-')
st = 1;
else
continue;
c = strtok_r(NULL, ",", &c_save); // g_start
if (c == NULL)
continue;
g_start = (uint)atoll(c);
if (g_start == 0)
continue;
g_start--;
c = strtok_r(NULL, ",", &c_save); // g_end
if (c == NULL)
continue;
g_end = (uint)atoll(c);
if (g_end == 0)
continue;
g_end--;
re->n_ranges++;
re->ranges = (struct range_restriction *)xrealloc(re->ranges, re->n_ranges * sizeof(re->ranges[0]));
re->ranges[re->n_ranges - 1].cn = cn;
re->ranges[re->n_ranges - 1].st = st;
re->ranges[re->n_ranges - 1].g_start = g_start;
re->ranges[re->n_ranges - 1].g_end = g_end;
}
}
*/
/* Trim a read */
static void trim_read(struct read_entry * re) {
re->orig_seq=strdup(re->seq);
if (Qflag) {
re->orig_qual=strdup(re->qual);
}
//handle colour space too!
int length=strlen(re->seq);
int i;
for (i=0; i<length-trim_end-trim_front; i++) {
re->seq[i]=re->seq[i+trim_front];
if (Qflag) {
re->qual[i]=re->qual[i+trim_front];
}
}
memset(re->seq+i,0,17);
if (Qflag) {
memset(re->qual+i,0,17);
}
return;
}
/*
* Launch the threads that will scan the reads
*/
static bool
launch_scan_threads(fasta_t fasta, fasta_t left_fasta, fasta_t right_fasta)
{
llint last_nreads, last_time_usecs;
bool read_more = true, more_in_left_file = true, more_in_right_file=true;
/* initiate the thread buffers */
//thread_output_buffer_sizes = (size_t *)xcalloc_m(sizeof(size_t) * num_threads, "thread_output_buffer_sizes");
thread_output_buffer_sizes = (size_t *)
my_calloc(num_threads * sizeof(size_t),
&mem_thread_buffer, "thread_output_buffer_sizes");
//thread_output_buffer_filled = (char * *)xcalloc_m(sizeof(char *) * num_threads, "thread_output_buffer_filled");
thread_output_buffer_filled = (char * *)
my_calloc(num_threads * sizeof(char *),
&mem_thread_buffer, "thread_output_buffer_filled");
//thread_output_buffer = (char * *)xcalloc_m(sizeof(char *) * num_threads, "thread_output_buffer");
thread_output_buffer = (char * *)
my_calloc(num_threads * sizeof(char *),
&mem_thread_buffer, "thread_output_buffer");
//thread_output_buffer_chunk = (unsigned int *)xcalloc_m(sizeof(unsigned int) * num_threads, "thread_output_buffer_chunk");
thread_output_buffer_chunk = (unsigned int *)
my_calloc(num_threads * sizeof(unsigned int),
&mem_thread_buffer, "thread_output_buffer_chunk");
unsigned int current_thread_chunk = 1;
unsigned int next_chunk_to_print = 1;
struct heap_out h;
heap_out_init(&h, thread_output_heap_capacity );
if (progress > 0) {
fprintf(stderr, "done r/hr r/core-hr\n");
last_nreads = 0;
last_time_usecs = gettimeinusecs();
}
#pragma omp parallel shared(read_more,more_in_left_file,more_in_right_file, fasta) num_threads(num_threads)
{
int thread_id = omp_get_thread_num();
struct read_entry * re_buffer;
int load, i;
//llint before, after;
//re_buffer = (struct read_entry *)xmalloc_m(chunk_size * sizeof(re_buffer[0]), "re_buffer");
re_buffer = (read_entry *)my_malloc(chunk_size * sizeof(re_buffer[0]), &mem_thread_buffer, "re_buffer");
while (read_more) {
memset(re_buffer, 0, chunk_size * sizeof(re_buffer[0]));
//before = rdtsc();
TIME_COUNTER_START(tpg.wait_tc);
//Read in this threads 'chunk'
#pragma omp critical (fill_reads_buffer)
{
//after = rdtsc();
//tpg.wait_ticks += MAX(after - before, 0);
TIME_COUNTER_STOP(tpg.wait_tc);
thread_output_buffer_chunk[thread_id]=current_thread_chunk++;
load = 0;
assert(chunk_size>2);
while (read_more && ((single_reads_file && load < chunk_size) || (!single_reads_file && load < chunk_size-1))) {
//if (!fasta_get_next_with_range(fasta, &re_buffer[load].name, &re_buffer[load].seq, &re_buffer[load].is_rna,
// &re_buffer[load].range_string, &re_buffer[load].qual))
if (single_reads_file) {
if (fasta_get_next_read_with_range(fasta, &re_buffer[load])) {
load++;
} else {
read_more = false;
}
} else {
//read from the left file
if (fasta_get_next_read_with_range(left_fasta, &re_buffer[load])) {
load++;
} else {
more_in_left_file = false;
}
//read from the right file
if (fasta_get_next_read_with_range(right_fasta, &re_buffer[load])) {
load++;
} else {
more_in_right_file = false;
}
//make sure that one is not smaller then the other
if (more_in_left_file != more_in_right_file) {
fprintf(stderr,"error: when using options -1 and -2, both files specified must have the same number of entries\n");
exit(1);
}
//keep reading?
read_more = more_in_left_file && more_in_right_file;
}
}
nreads += load;
// progress reporting
if (progress > 0) {
nreads_mod += load;
if (nreads_mod >= progress) {
llint time_usecs = gettimeinusecs();
fprintf(stderr, "%lld %d %d.\r", nreads,
(int)(((double)(nreads - last_nreads)/(double)(time_usecs - last_time_usecs)) * 3600.0 * 1.0e6),
(int)(((double)(nreads - last_nreads)/(double)(time_usecs - last_time_usecs)) * 3600.0 * 1.0e6 * (1/(double)num_threads)) );
last_nreads = nreads;
last_time_usecs = time_usecs;
}
nreads_mod %= progress;
}
} // end critical section
if (pair_mode != PAIR_NONE)
assert(load % 2 == 0); // read even number of reads
thread_output_buffer_sizes[thread_id] = thread_output_buffer_initial;
//thread_output_buffer[thread_id] = (char *)xmalloc_m(sizeof(char) * thread_output_buffer_sizes[thread_id], "thread_buffer");
thread_output_buffer[thread_id] = (char *)
my_malloc(thread_output_buffer_sizes[thread_id] * sizeof(char),
&mem_thread_buffer, "thread_output_buffer[]");
thread_output_buffer_filled[thread_id] = thread_output_buffer[thread_id];
thread_output_buffer[thread_id][0] = '\0';
for (i = 0; i < load; i++) {
// if running in paired mode and first foot is ignored, ignore this one, too
if (pair_mode != PAIR_NONE && i % 2 == 1 && re_buffer[i-1].ignore) {
//read_free(&re_buffer[i-1]);
read_free(&re_buffer[i]);
continue;
}
//if (!(strcspn(re_buffer[i].seq, "nNxX.") == strlen(re_buffer[i].seq))) {
// if (pair_mode != PAIR_NONE && i % 2 == 1) {
// read_free(re_buffer+i-1);
// read_free(re_buffer+i);
// }
// continue;
//}
//Trim the reads
if (trim) {
if (pair_mode != PAIR_NONE) {
if (trim_first) {
trim_read(&re_buffer[i-1]);
}
if (trim_second) {
trim_read(&re_buffer[i]);
}
} else {
trim_read(&re_buffer[i]);
}
}
if (shrimp_mode == MODE_LETTER_SPACE && trim_illumina) {
int trailing_Bs=0;
for (int j=0; j<(int)strlen(re_buffer[i].qual); j++) {
if (re_buffer[i].qual[j]!='B') {
trailing_Bs=0;
} else {
trailing_Bs+=1;
}
}
if (trailing_Bs>0) {
re_buffer[i].seq[strlen(re_buffer[i].seq)-trailing_Bs]='\0';
re_buffer[i].qual[strlen(re_buffer[i].qual)-trailing_Bs]='\0';
}
}
//compute average quality value
re_buffer[i].read_len = strlen(re_buffer[i].seq);
if (Qflag && !ignore_qvs && min_avg_qv >= 0) {
//fprintf(stderr, "read:[%s] qual:[%s]", re_buffer[i].name, re_buffer[i].qual);
re_buffer[i].avg_qv = 0;
for (char * c = re_buffer[i].qual; *c != 0; c++) {
re_buffer[i].avg_qv += (*c - qual_delta);
}
//fprintf(stderr, " avg_qv:%d\n", re_buffer[i].avg_qv);
}
if (Qflag && !ignore_qvs && !no_qv_check) {
for (char * c =re_buffer[i].qual; *c !=0; c++) {
int qual_value=(*c-qual_delta);
if (qual_value<-10 || qual_value>50) {
fprintf(stderr,"The qv-offset might be set incorrectly! Currenty qvs are interpreted as PHRED+%d\
and a qv of %d was observed. To disable this error, etiher set the offset correctly or disable this check (see README).\n",qual_delta,qual_value);
exit(1);
}
}
}
re_buffer[i].max_n_kmers = re_buffer[i].read_len - min_seed_span + 1;
re_buffer[i].read[0] = fasta_sequence_to_bitfield(fasta, re_buffer[i].seq);
if (shrimp_mode == MODE_COLOUR_SPACE) {
re_buffer[i].read_len--;
re_buffer[i].max_n_kmers -= 2; // 1st color always discarded from kmers
re_buffer[i].min_kmer_pos = 1;
re_buffer[i].initbp[0] = fasta_get_initial_base(shrimp_mode,re_buffer[i].seq);
re_buffer[i].initbp[1] = re_buffer[i].initbp[0];
re_buffer[i].read[1] = reverse_complement_read_cs(re_buffer[i].read[0], (int8_t)re_buffer[i].initbp[0],
(int8_t)re_buffer[i].initbp[1],
re_buffer[i].read_len, re_buffer[i].is_rna);
} else {
re_buffer[i].read[1] = reverse_complement_read_ls(re_buffer[i].read[0], re_buffer[i].read_len, re_buffer[i].is_rna);
}
if (re_buffer[i].max_n_kmers < 0)
re_buffer[i].max_n_kmers = 0;
if (re_buffer[i].read_len > 0)
re_buffer[i].avg_qv /= re_buffer[i].read_len;
//Check if we can actually use this read
if (//re_buffer[i].max_n_kmers <= 0
//||
re_buffer[i].read_len > longest_read_len
|| (Qflag && !ignore_qvs && re_buffer[i].avg_qv < min_avg_qv) // ignore reads with low avg qv
) {
if (re_buffer[i].max_n_kmers <= 0) {
fprintf(stderr, "warning: skipping read [%s]; smaller then any seed!\n",
re_buffer[i].name);
re_buffer[i].max_n_kmers=1;
} else if (re_buffer[i].read_len > longest_read_len) {
fprintf(stderr, "warning: skipping read [%s]; it has length %d, maximum allowed is %d. Use --longest-read ?\n",
re_buffer[i].name, re_buffer[i].read_len, longest_read_len);
} else {
//fprintf(stderr, "skipping read [%s] with avg_qv:%g\n", re_buffer[i].name, (double)re_buffer[i].avg_qv);
}
if (pair_mode == PAIR_NONE) {
#pragma omp atomic
total_reads_dropped++;
read_free_full(&re_buffer[i], &mem_mapping);
} else {
#pragma omp atomic
total_pairs_dropped++;
if (i%2 == 1) {
read_free_full(&re_buffer[i-1], &mem_mapping);
read_free_full(&re_buffer[i], &mem_mapping);
} else {
read_free_full(&re_buffer[i], &mem_mapping);
re_buffer[i].ignore = true;
}
}
continue;
}
re_buffer[i].window_len = (uint16_t)abs_or_pct(window_len,re_buffer[i].read_len);
// compute position-based crossover scores based on qvs
if (shrimp_mode == MODE_COLOUR_SPACE && Qflag && !ignore_qvs) {
int j;
re_buffer[i].crossover_score = (int *)xmalloc(re_buffer[i].read_len * sizeof(re_buffer[i].crossover_score[0]));
for (j = 0; j < re_buffer[i].read_len; j++) {
re_buffer[i].crossover_score[j] = (int)(score_alpha * log(pr_err_from_qv(re_buffer[i].qual[j] - qual_delta) / 3.0) / log(2.0));
if (re_buffer[i].crossover_score[j] > -1) {
re_buffer[i].crossover_score[j] = -1;
} else if (re_buffer[i].crossover_score[j] < 2*crossover_score) {
re_buffer[i].crossover_score[j] = 2*crossover_score;
}
}
}
if (re_buffer[i].range_string != NULL) {
assert(0); // not maintained
//read_compute_ranges(&re_buffer[i]);
free(re_buffer[i].range_string);
re_buffer[i].range_string = NULL;
}
//free(re_buffer[i].seq);
// time to do some mapping!
if (pair_mode == PAIR_NONE)
{
handle_read(&re_buffer[i], unpaired_mapping_options[0], n_unpaired_mapping_options[0]);
read_free_full(&re_buffer[i], &mem_mapping);
}
else if (i % 2 == 1)
{
pair_entry pe;
memset(&pe, 0, sizeof(pe));
if (pair_reverse[pair_mode][0])
read_reverse(&re_buffer[i-1]);
if (pair_reverse[pair_mode][1])
read_reverse(&re_buffer[i]);
re_buffer[i-1].paired=true;
re_buffer[i-1].first_in_pair=true;
re_buffer[i-1].mate_pair=&re_buffer[i];
re_buffer[i].paired=true;
re_buffer[i].first_in_pair=false;
re_buffer[i].mate_pair=&re_buffer[i-1];
pe.re[0] = &re_buffer[i-1];
pe.re[1] = &re_buffer[i];
handle_readpair(&pe, paired_mapping_options, n_paired_mapping_options);
readpair_free_full(&pe, &mem_mapping);
}
}
// free unused memory while the buffer waits in the output heap
size_t new_size = (thread_output_buffer_filled[thread_id] - thread_output_buffer[thread_id]) + 1;
thread_output_buffer[thread_id] = (char *)
my_realloc(thread_output_buffer[thread_id], new_size, thread_output_buffer_sizes[thread_id],
&mem_thread_buffer, "thread_output_buffer[]");
//fprintf(stdout,"%s",thread_output_buffer[thread_id]);
#pragma omp critical
{
struct heap_out_elem tmp;
tmp.key = thread_output_buffer_chunk[thread_id];
//tmp.rest = thread_output_buffer[thread_id];
tmp.rest.ptr = thread_output_buffer[thread_id];
tmp.rest.sz = new_size; //thread_output_buffer_sizes[thread_id];
thread_output_buffer[thread_id] = NULL;
heap_out_insert(&h, &tmp);
heap_out_get_min(&h, &tmp);
while (h.load > 0 && tmp.key == next_chunk_to_print) {
heap_out_extract_min(&h, &tmp);
//fprintf(stdout, "%s", tmp.rest);
fprintf(stdout, "%s", (char *)tmp.rest.ptr);
//free(tmp.rest);
my_free(tmp.rest.ptr, tmp.rest.sz,
&mem_thread_buffer, "thread_output_buffer[]");
next_chunk_to_print++;
}
}
}
//free(re_buffer);
my_free(re_buffer, chunk_size * sizeof(re_buffer[0]),
&mem_thread_buffer, "re_buffer");
} // end parallel section
if (progress > 0)
fprintf(stderr, "\n");
//assert(h.load == 0);
struct heap_out_elem tmp;
while (h.load>0) {
heap_out_extract_min(&h,&tmp);
fprintf(stdout,"%s",(char *)tmp.rest.ptr);
//free(tmp.rest);
my_free(tmp.rest.ptr, tmp.rest.sz,
&mem_thread_buffer, "thread_output_buffer[]");
}
heap_out_destroy(&h);
//free(thread_output_buffer_sizes);
my_free(thread_output_buffer_sizes, sizeof(size_t) * num_threads,
&mem_thread_buffer, "thread_output_buffer_sizes");
//free(thread_output_buffer_filled);
my_free(thread_output_buffer_filled, sizeof(char *) * num_threads,
&mem_thread_buffer, "thread_output_buffer_filled");
//free(thread_output_buffer);
my_free(thread_output_buffer, sizeof(char *) * num_threads,
&mem_thread_buffer, "thread_output_buffer");
//free(thread_output_buffer_chunk);
my_free(thread_output_buffer_chunk, sizeof(unsigned int) * num_threads,
&mem_thread_buffer, "thread_output_buffer_chunk");
return true;
}
static char *
thres_to_buff(char * buff, double * thres)
{
if (IS_ABSOLUTE(*thres)) {
sprintf(buff, "%u", -(uint)(*thres));
} else {
sprintf(buff, "%.02f%%", *thres);
}
return buff;
}
static char *
bool_to_buff(char * buff, bool * val)
{
sprintf(buff, "%s", *val ? "true" : "false");
return buff;
}
static void
print_insert_histogram()
{
int i;
for (i = 0; i < 100; i++) {
fprintf(stderr, "[%d-%d]: %.2f%%\n",
min_insert_size + i * insert_histogram_bucket_size,
min_insert_size + (i + 1) * insert_histogram_bucket_size - 1,
total_paired_matches == 0? 0 : ((double)insert_histogram[i] / (double)total_paired_matches) * 100);
}
}
typedef struct tp_stats_t {
uint64_t f1_invocs, f1_cells, f1_ticks;
double f1_secs, f1_cellspersec;
uint64_t f2_invocs, f2_cells, f2_ticks;
double f2_secs, f2_cellspersec;
uint64_t fwbw_invocs, fwbw_cells, fwbw_ticks;
double fwbw_secs, fwbw_cellspersec;
double scan_secs, readparse_secs, read_handle_overhead_secs;
double anchor_list_secs, hit_list_secs;
double region_counts_secs, mp_region_counts_secs, duplicate_removal_secs;
double pass1_secs, get_vector_hits_secs, pass2_secs;
} tp_stats_t;
static void
print_statistics()
{
static char const my_tab[] = " ";
//uint64_t f1_invocs[num_threads], f1_cells[num_threads], f1_ticks[num_threads];
//double f1_secs[num_threads], f1_cellspersec[num_threads];
uint64_t f1_total_invocs = 0, f1_total_cells = 0;
double f1_total_secs = 0, f1_total_cellspersec = 0;
uint64_t f1_calls_bypassed = 0;
//uint64_t f2_invocs[num_threads], f2_cells[num_threads], f2_ticks[num_threads];
//double f2_secs[num_threads], f2_cellspersec[num_threads];
uint64_t f2_total_invocs = 0, f2_total_cells = 0;
double f2_total_secs = 0, f2_total_cellspersec = 0;
//uint64_t fwbw_invocs[num_threads], fwbw_cells[num_threads], fwbw_ticks[num_threads];
//double fwbw_secs[num_threads], fwbw_cellspersec[num_threads];
uint64_t fwbw_total_invocs = 0, fwbw_total_cells = 0;
double fwbw_total_secs = 0, fwbw_total_cellspersec = 0;
//double scan_secs[num_threads], readparse_secs[num_threads], read_handle_overhead_secs[num_threads];
//double anchor_list_secs[num_threads], hit_list_secs[num_threads];
//double region_counts_secs[num_threads], mp_region_counts_secs[num_threads], duplicate_removal_secs[num_threads];
//double pass1_secs[num_threads], get_vector_hits_secs[num_threads], pass2_secs[num_threads];
double total_scan_secs = 0, total_wait_secs = 0, total_readparse_secs = 0;
tp_stats_t * tps = (tp_stats_t *)malloc(num_threads * sizeof(tps[0]));
tpg_t * tpgA = (tpg_t *)malloc(num_threads * sizeof(tpgA[0]));
double hz;
double fasta_load_secs;
fasta_stats_t fs;
fs = fasta_stats();
fasta_load_secs = fs->total_secs;
free(fs);
hz = cpuhz();
#pragma omp parallel num_threads(num_threads) shared(hz)
{
int tid = omp_get_thread_num();
memcpy(&tpgA[tid], &tpg, sizeof(tpg_t));
f1_stats(&tps[tid].f1_invocs, &tps[tid].f1_cells, &tps[tid].f1_secs, NULL);
//tps[tid].f1_secs = (double)tps[tid].f1_ticks / hz;
tps[tid].f1_cellspersec = (double)tps[tid].f1_cells / tps[tid].f1_secs;
if (isnan(tps[tid].f1_cellspersec))
tps[tid].f1_cellspersec = 0;
if (shrimp_mode == MODE_COLOUR_SPACE) {
sw_full_cs_stats(&tps[tid].f2_invocs, &tps[tid].f2_cells, &tps[tid].f2_secs);
post_sw_stats(&tps[tid].fwbw_invocs, &tps[tid].fwbw_cells, &tps[tid].fwbw_secs);
} else {
sw_full_ls_stats(&tps[tid].f2_invocs, &tps[tid].f2_cells, &tps[tid].f2_secs);
tps[tid].fwbw_secs = 0;
}
//tps[tid].f2_secs = (double)tps[tid].f2_ticks / hz;
tps[tid].f2_cellspersec = (double)tps[tid].f2_cells / tps[tid].f2_secs;
if (isnan(tps[tid].f2_cellspersec))
tps[tid].f2_cellspersec = 0;
//tps[tid].fwbw_secs = (double)tps[tid].fwbw_ticks / hz;
if (shrimp_mode == MODE_COLOUR_SPACE) {
tps[tid].fwbw_cellspersec = (double)tps[tid].fwbw_cells / tps[tid].fwbw_secs;
if (isnan(tps[tid].fwbw_cellspersec))
tps[tid].fwbw_cellspersec = 0;
}
tps[tid].readparse_secs = ((double)mapping_wallclock_usecs / 1.0e6) - ((double)tpg.read_handle_usecs / 1.0e6) - time_counter_get_secs(&tpg.wait_tc);
tps[tid].scan_secs = ((double)tpg.read_handle_usecs / 1.0e6) - tps[tid].f1_secs - tps[tid].f2_secs - tps[tid].fwbw_secs;
tps[tid].scan_secs = MAX(0, tps[tid].scan_secs);
//tps[tid].anchor_list_secs = (double)tpg.anchor_list_ticks / hz;
tps[tid].anchor_list_secs = time_counter_get_secs(&tpg.anchor_list_tc);
//tps[tid].hit_list_secs = (double)tpg.hit_list_ticks / hz;
tps[tid].hit_list_secs = time_counter_get_secs(&tpg.hit_list_tc);
//tps[tid].duplicate_removal_secs = (double)tpg.duplicate_removal_ticks / hz;
tps[tid].duplicate_removal_secs = time_counter_get_secs(&tpg.duplicate_removal_tc);
//tps[tid].region_counts_secs = (double)tpg.region_counts_ticks / hz;
tps[tid].region_counts_secs = time_counter_get_secs(&tpg.region_counts_tc);
//tps[tid].mp_region_counts_secs = (double)tpg.mp_region_counts_ticks / hz;
tps[tid].mp_region_counts_secs = time_counter_get_secs(&tpg.mp_region_counts_tc);
//tps[tid].pass1_secs = (double)tpg.pass1_ticks / hz;
tps[tid].pass1_secs = time_counter_get_secs(&tpg.pass1_tc);
//tps[tid].get_vector_hits_secs = (double)tpg.get_vector_hits_ticks / hz;
tps[tid].get_vector_hits_secs = time_counter_get_secs(&tpg.get_vector_hits_tc);
//tps[tid].pass2_secs = (double)tpg.pass2_ticks / hz;
tps[tid].pass2_secs = time_counter_get_secs(&tpg.pass2_tc);
/*
tps[tid].anchor_list_secs = (double)anchor_list_usecs[tid] / 1.0e6;
tps[tid].hit_list_secs = (double)hit_list_usecs[tid] / 1.0e6;
tps[tid].duplicate_removal_secs = (double)duplicate_removal_usecs[tid] / 1.0e6;
tps[tid].region_counts_secs = (double)region_counts_usecs[tid] / 1.0e6;
*/
tps[tid].read_handle_overhead_secs = tps[tid].scan_secs
- tps[tid].region_counts_secs - tps[tid].anchor_list_secs - tps[tid].hit_list_secs - tps[tid].duplicate_removal_secs;
}
f1_stats(NULL, NULL, NULL, &f1_calls_bypassed);
fprintf(stderr, "\nStatistics:\n");
fprintf(stderr, "%sOverall:\n", my_tab);
fprintf(stderr, "%s%s%-24s" "%.2f seconds\n", my_tab, my_tab,
"Load Genome Time:", (double)load_genome_usecs / 1.0e6);
fprintf(stderr, "%s%s%-24s" "%.2f seconds\n", my_tab, my_tab,
"Read Mapping Time:", (double)mapping_wallclock_usecs / 1.0e6);
fprintf(stderr, "%s%s%-24s" "%s\n", my_tab, my_tab,
"Reads per hour:", comma_integer((int)(((double)nreads/(double)mapping_wallclock_usecs) * 3600.0 * 1.0e6)));
fprintf(stderr, "%s%s%-24s" "%s\n", my_tab, my_tab,
"Reads per core-hour:", comma_integer((int)(((double)nreads/(double)mapping_wallclock_usecs) * 3600.0 * 1.0e6 * (1/(double)num_threads))));
fprintf(stderr, "\n");
int i;
for(i = 0; i < num_threads; i++){
total_scan_secs += tps[i].scan_secs;
total_readparse_secs += tps[i].readparse_secs;
total_wait_secs += time_counter_get_secs(&tpgA[i].wait_tc);
f1_total_secs += tps[i].f1_secs;
f1_total_invocs += tps[i].f1_invocs;
f1_total_cells += tps[i].f1_cells;
f2_total_secs += tps[i].f2_secs;
f2_total_invocs += tps[i].f2_invocs;
f2_total_cells += tps[i].f2_cells;
if (shrimp_mode == MODE_COLOUR_SPACE) {
fwbw_total_secs += tps[i].fwbw_secs;
fwbw_total_invocs += tps[i].fwbw_invocs;
fwbw_total_cells += tps[i].fwbw_cells;
} else {
fwbw_total_secs = 0;
fwbw_total_invocs = 0;
fwbw_total_cells = 0;
}
}
f1_total_cellspersec = f1_total_secs == 0? 0 : (double)f1_total_cells / f1_total_secs;
f2_total_cellspersec = f2_total_secs == 0? 0 : (double)f2_total_cells / f2_total_secs;
fwbw_total_cellspersec = fwbw_total_secs == 0? 0 : (double)fwbw_total_cells / fwbw_total_secs;
if (Dflag) {
fprintf(stderr, "%sPer-Thread Stats:\n", my_tab);
fprintf(stderr, "%s%s" "%11s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %25s %25s %25s %9s\n", my_tab, my_tab,
"", "ReadParse", "Scan", "Reg Cnts", "MPRegCnt", "Anch List", "Hit List", "Pass1", "Vect Hits", "Pass2", "Dup Remv",
"Vector SW", "Scalar SW", "Post SW", "Wait");
fprintf(stderr, "%s%s" "%11s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %15s %9s %15s %9s %15s %9s %9s\n", my_tab, my_tab,
"", "Time", "Time", "Time", "Time", "Time", "Time", "Time", "Time", "Time", "Time",
"Invocs", "Time", "Invocs", "Time", "Invocs", "Time", "Time");
fprintf(stderr, "\n");
for(i = 0; i < num_threads; i++) {
fprintf(stderr, "%s%s" "Thread %-4d %9.2f %9.2f %9.2f %9.2f %9.2f %9.2f %9.2f %9.2f %9.2f %9.2f %15s %9.2f %15s %9.2f %15s %9.2f %9.2f\n", my_tab, my_tab,
i, tps[i].readparse_secs, tps[i].scan_secs,
tps[i].region_counts_secs, tps[i].mp_region_counts_secs, tps[i].anchor_list_secs, tps[i].hit_list_secs,
tps[i].pass1_secs, tps[i].get_vector_hits_secs, tps[i].pass2_secs, tps[i].duplicate_removal_secs,
comma_integer(tps[i].f1_invocs), tps[i].f1_secs,
comma_integer(tps[i].f2_invocs), tps[i].f2_secs,
comma_integer(tps[i].fwbw_invocs), tps[i].fwbw_secs,
time_counter_get_secs(&tpgA[i].wait_tc));
}
for (i = 0; i < num_threads; i++) {
fprintf (stderr, "thrd:%d anchor_list_init_size:(%.2f, %.2f) anchors_discarded:(%.2f, %.2f) big_gaps:(%.2f, %.2f)\n",
i, stat_get_mean(&tpgA[i].anchor_list_init_size), stat_get_sample_stddev(&tpgA[i].anchor_list_init_size),
stat_get_mean(&tpgA[i].n_anchors_discarded), stat_get_sample_stddev(&tpgA[i].n_anchors_discarded),
stat_get_mean(&tpgA[i].n_big_gaps_anchor_list), stat_get_sample_stddev(&tpgA[i].n_big_gaps_anchor_list));
}
fprintf(stderr, "\n");
}
fprintf(stderr, "%sSpaced Seed Scan:\n", my_tab);
fprintf(stderr, "%s%s%-24s" "%.2f seconds\n", my_tab, my_tab,
"Run-time:", total_scan_secs);
fprintf(stderr, "\n");
fprintf(stderr, "%sVector Smith-Waterman:\n", my_tab);
fprintf(stderr, "%s%s%-24s" "%.2f seconds\n", my_tab, my_tab,
"Run-time:", f1_total_secs);
fprintf(stderr, "%s%s%-24s" "%s\n", my_tab, my_tab,
"Invocations:", comma_integer(f1_total_invocs));
fprintf(stderr, "%s%s%-24s" "%s\n", my_tab, my_tab,
"Bypassed Calls:", comma_integer(f1_calls_bypassed));
fprintf(stderr, "%s%s%-24s" "%.2f million\n", my_tab, my_tab,
"Cells Computed:", (double)f1_total_cells / 1.0e6);
fprintf(stderr, "%s%s%-24s" "%.2f million\n", my_tab, my_tab,
"Cells per Second:", f1_total_cellspersec / 1.0e6);
fprintf(stderr, "\n");
fprintf(stderr, "%sScalar Smith-Waterman:\n", my_tab);
fprintf(stderr, "%s%s%-24s" "%.2f seconds\n", my_tab, my_tab,
"Run-time:", f2_total_secs);
fprintf(stderr, "%s%s%-24s" "%s\n", my_tab, my_tab,
"Invocations:", comma_integer(f2_total_invocs));
fprintf(stderr, "%s%s%-24s" "%.2f million\n", my_tab, my_tab,
"Cells Computed:", (double)f2_total_cells / 1.0e6);
fprintf(stderr, "%s%s%-24s" "%.2f million\n", my_tab, my_tab,
"Cells per Second:", f2_total_cellspersec / 1.0e6);
fprintf(stderr, "\n");
if (shrimp_mode == MODE_COLOUR_SPACE) {
fprintf(stderr, "%sForward-Backward:\n", my_tab);
fprintf(stderr, "%s%s%-24s" "%.2f seconds\n", my_tab, my_tab,
"Run-time:", fwbw_total_secs);
fprintf(stderr, "%s%s%-24s" "%s\n", my_tab, my_tab,
"Invocations:", comma_integer(fwbw_total_invocs));
fprintf(stderr, "%s%s%-24s" "%.2f million\n", my_tab, my_tab,
"Cells Computed:", (double)fwbw_total_cells / 1.0e6);
fprintf(stderr, "%s%s%-24s" "%.2f million\n", my_tab, my_tab,
"Cells per Second:", fwbw_total_cellspersec / 1.0e6);
fprintf(stderr, "\n");
}
fprintf(stderr, "%sMiscellaneous Totals:\n", my_tab);
fprintf(stderr, "%s%s%-24s" "%.2f seconds\n", my_tab, my_tab,
"Fasta Lib Time:", fasta_load_secs);
fprintf(stderr, "%s%s%-24s" "%.2f seconds\n", my_tab, my_tab,
"Read Load Time:", total_readparse_secs);
fprintf(stderr, "%s%s%-24s" "%.2f seconds\n", my_tab, my_tab,
"Wait Time:", total_wait_secs);
fprintf(stderr, "\n");
fprintf(stderr, "%sGeneral:\n", my_tab);
if (pair_mode == PAIR_NONE)
{
fprintf(stderr, "%s%s%-24s" "%s (%.4f%%)\n", my_tab, my_tab,
"Reads Matched:",
comma_integer(total_reads_matched),
(nreads == 0) ? 0 : ((double)total_reads_matched / (double)nreads) * 100);
fprintf(stderr, "%s%s%-24s" "%s (%.4f%%)\n", my_tab, my_tab,
"... with QV >= 10:",
comma_integer(total_reads_matched_conf),
(nreads == 0) ? 0 : ((double)total_reads_matched_conf / (double)nreads) * 100);
fprintf(stderr, "%s%s%-24s" "%s (%.4f%%)\n", my_tab, my_tab,
"Reads Dropped:",
comma_integer(total_reads_dropped),
(nreads == 0) ? 0 : ((double)total_reads_dropped / (double)nreads) * 100);
fprintf(stderr, "%s%s%-24s" "%s\n", my_tab, my_tab,
"Total Matches:",
comma_integer(total_single_matches));
fprintf(stderr, "%s%s%-24s" "%.2f\n", my_tab, my_tab,
"Avg Hits/Matched Read:",
(total_reads_matched == 0) ? 0 : ((double)total_single_matches / (double)total_reads_matched));
fprintf(stderr, "%s%s%-24s" "%s\n", my_tab, my_tab,
"Duplicate Hits Pruned:",
comma_integer(total_dup_single_matches));
}
else // paired hits
{
fprintf(stderr, "%s%s%-40s" "%s (%.4f%%)\n", my_tab, my_tab,
"Pairs Matched:",
comma_integer(total_pairs_matched),
(nreads == 0) ? 0 : ((double)total_pairs_matched / (double)(nreads/2)) * 100);
fprintf(stderr, "%s%s%-40s" "%s (%.4f%%)\n", my_tab, my_tab,
"... with QV >= 10:",
comma_integer(total_pairs_matched_conf),
(nreads == 0) ? 0 : ((double)total_pairs_matched_conf / (double)(nreads/2)) * 100);
fprintf(stderr, "%s%s%-40s" "%s (%.4f%%)\n", my_tab, my_tab,
"Pairs Dropped:",
comma_integer(total_pairs_dropped),
(nreads == 0) ? 0 : ((double)total_pairs_dropped / (double)(nreads/2)) * 100);
fprintf(stderr, "%s%s%-40s" "%s\n", my_tab, my_tab,
"Total Paired Matches:",
comma_integer(total_paired_matches));
fprintf(stderr, "%s%s%-40s" "%.2f\n", my_tab, my_tab,
"Avg Matches/Pair Matched:",
(total_pairs_matched == 0) ? 0 : ((double)total_paired_matches / (double)total_pairs_matched));
fprintf(stderr, "%s%s%-40s" "%s\n", my_tab, my_tab,
"Duplicate Paired Matches Pruned:",
comma_integer(total_dup_paired_matches));
if (half_paired) {
fprintf(stderr, "\n");
fprintf(stderr, "%s%s%-40s" "%s (%.4f%%)\n", my_tab, my_tab,
"Additional Reads Matched Unpaired:",
comma_integer(total_reads_matched),
(nreads == 0) ? 0 : ((double)total_reads_matched / (double)nreads) * 100);
fprintf(stderr, "%s%s%-40s" "%s (%.4f%%)\n", my_tab, my_tab,
"... with QV >= 10:",
comma_integer(total_reads_matched_conf),
(nreads == 0) ? 0 : ((double)total_reads_matched_conf / (double)nreads) * 100);
fprintf(stderr, "%s%s%-40s" "%s\n", my_tab, my_tab,
"Total Unpaired Matches:",
comma_integer(total_single_matches));
fprintf(stderr, "%s%s%-40s" "%.2f\n", my_tab, my_tab,
"Avg Matches/Unpaired Matched Read:",
(total_reads_matched == 0) ? 0 : ((double)total_single_matches / (double)total_reads_matched));
fprintf(stderr, "%s%s%-40s" "%s\n", my_tab, my_tab,
"Duplicate Unpaired Matches Pruned:",
comma_integer(total_dup_single_matches));
}
}
fprintf(stderr, "\n");
fprintf(stderr, "%sMemory usage:\n", my_tab);
fprintf(stderr, "%s%s%-24s" "%s\n", my_tab, my_tab,
"Genomemap:",
comma_integer(count_get_count(&mem_genomemap)));
if (Xflag) {
print_insert_histogram();
}
free(tps);
free(tpgA);
}
static void
usage(char * progname, bool full_usage){
char *slash;
int sn;
if (n_seeds == 0)
load_default_seeds(0);
slash = strrchr(progname, '/');
if (slash != NULL)
progname = slash + 1;
fprintf(stderr,
"usage: %s [options/parameters] { <r> | -1 <r1> -2 <r2> } <g1> <g2>...\n", progname);
fprintf(stderr,
" <r> Reads filename, paired or unpaired\n");
fprintf(stderr,
" <r1> Upstream reads filename\n");
fprintf(stderr,
" <r2> Downstream reads filename\n");
fprintf(stderr,
" <g1> <g2>... Space seperated list of genome filenames\n");
fprintf(stderr,
"Parameters:\n");
fprintf(stderr,
" -s/--seeds Spaced Seed(s) (default: ");
for (sn = 0; sn < n_seeds; sn++) {
if (sn > 0)
fprintf(stderr,
" ");
fprintf(stderr, "%s%s\n", seed_to_string(sn), (sn == n_seeds - 1? ")" : ","));
}
fprintf(stderr,
" -o/--report Maximum Hits per Read (default: %d)\n",
DEF_NUM_OUTPUTS);
fprintf(stderr,
" --max-alignments Max. align. per read (0=all) (default: %d)\n",
DEF_MAX_ALIGNMENTS);
fprintf(stderr,
" -w/--match-window Match Window Length (default: %.02f%%)\n",
DEF_WINDOW_LEN);
fprintf(stderr,
" -n/--cmw-mode Match Mode (default: unpaired:%d paired:%d)\n",
DEF_MATCH_MODE_UNPAIRED, DEF_MATCH_MODE_PAIRED);
if (full_usage) {
fprintf(stderr,
" -l/--cmw-overlap Match Window Overlap Length (default: %.02f%%)\n",
DEF_WINDOW_OVERLAP);
fprintf(stderr,
" -a/--anchor-width Anchor Width Limiting Full SW (default: %d; disable: -1)\n",
DEF_ANCHOR_WIDTH);
fprintf(stderr, "\n");
fprintf(stderr,
" -S/--save Save Genome Proj. in File (default: no)\n");
fprintf(stderr,
" -L/--load Load Genome Proj. from File (default: no)\n");
fprintf(stderr,
" -z/--cutoff Projection List Cut-off Len. (default: %u)\n",
DEF_LIST_CUTOFF);
}
fprintf(stderr, "\n");
fprintf(stderr,
" -m/--match SW Match Score (default: %d)\n",
shrimp_mode == MODE_LETTER_SPACE? DEF_LS_MATCH_SCORE : DEF_CS_MATCH_SCORE);
fprintf(stderr,
" -i/--mismatch SW Mismatch Score (default: %d)\n",
shrimp_mode == MODE_LETTER_SPACE? DEF_LS_MISMATCH_SCORE : DEF_CS_MISMATCH_SCORE);
fprintf(stderr,
" -g/--open-r SW Gap Open Score (Reference) (default: %d)\n",
shrimp_mode == MODE_LETTER_SPACE? DEF_LS_A_GAP_OPEN : DEF_CS_A_GAP_OPEN);
fprintf(stderr,
" -q/--open-q SW Gap Open Score (Query) (default: %d)\n",
shrimp_mode == MODE_LETTER_SPACE? DEF_LS_B_GAP_OPEN : DEF_CS_B_GAP_OPEN);
fprintf(stderr,
" -e/--ext-r SW Gap Extend Score(Reference)(default: %d)\n",
shrimp_mode == MODE_LETTER_SPACE? DEF_LS_A_GAP_EXTEND : DEF_CS_A_GAP_EXTEND);
fprintf(stderr,
" -f/--ext-q SW Gap Extend Score (Query) (default: %d)\n",
shrimp_mode == MODE_LETTER_SPACE? DEF_LS_B_GAP_EXTEND : DEF_CS_B_GAP_EXTEND);
if (shrimp_mode == MODE_COLOUR_SPACE) {
fprintf(stderr,
" -x/--crossover SW Crossover Score (default: %d)\n",
DEF_CS_XOVER_SCORE);
}
fprintf(stderr,
" -r/--cmw-threshold Window Generation Threshold (default: %.02f%%)\n",
DEF_WINDOW_GEN_THRESHOLD);
if (shrimp_mode == MODE_COLOUR_SPACE) {
fprintf(stderr,
" -v/--vec-threshold SW Vector Hit Threshold (default: %.02f%%)\n",
DEF_SW_VECT_THRESHOLD);
}
fprintf(stderr,
" -h/--full-threshold SW Full Hit Threshold (default: %.02f%%)\n",
DEF_SW_FULL_THRESHOLD);
fprintf(stderr, "\n");
fprintf(stderr,
" -N/--threads Number of Threads (default: %d)\n",
DEF_NUM_THREADS);
if (full_usage) {
fprintf(stderr,
" -K/--thread-chunk Thread Chunk Size (default: %d)\n",
DEF_CHUNK_SIZE);
}
fprintf(stderr, "\n");
fprintf(stderr,
" -p/--pair-mode Paired Mode (default: %s)\n",
pair_mode_string[pair_mode]);
fprintf(stderr,
" -I/--isize Min and Max Insert Size (default: %d,%d)\n",
DEF_MIN_INSERT_SIZE, DEF_MAX_INSERT_SIZE);
fprintf(stderr,
" --longest-read Maximum read length (default: %d)\n",
DEF_LONGEST_READ_LENGTH);
fprintf(stderr,
" -1/--upstream Upstream read pair file\n");
fprintf(stderr,
" -2/--downstream Downstream read pair file\n");
fprintf(stderr,
" --un Dump unaligned reads to file\n");
fprintf(stderr,
" --al Dump aligned reads to file\n");
fprintf(stderr,
" --read-group Attach SAM Read Group name\n");
fprintf(stderr,
" --sam-header Use file as SAM header\n");
fprintf(stderr,
" --single-best-mapping Report only the best mapping(s), this is not strata (see README)\n");
fprintf(stderr,
" --all-contigs Report a maximum of 1 mapping for each read.\n");
fprintf(stderr,
" --no-mapping-qualities Do not compute mapping qualities\n");
fprintf(stderr,
" --insert-size-dist Specifies the mean and stddev of the insert sizes\n");
fprintf(stderr,
" --no-improper-mappings (see README)\n");
if (full_usage) {
fprintf(stderr,
" --trim-front Trim front of reads by this amount\n");
fprintf(stderr,
" --trim-end Trim end of reads by this amount\n");
fprintf(stderr,
" --trim-first Trim only first read in pair\n");
fprintf(stderr,
" --trim-second Trim only second read in pair\n");
fprintf(stderr,
" --min-avg-qv The minimum average quality value of a read\n");
fprintf(stderr,
" --progress Display a progress line each <value> reads. (default %d)\n",progress);
fprintf(stderr,
" --save-mmap Save genome projection to shared memory\n");
fprintf(stderr,
" --load-mmap Load genome projection from shared memory\n");
fprintf(stderr,
" --indel-taboo-len Prevent indels from starting or ending in the tail\n");
fprintf(stderr,
" --shrimp-format Output mappings in SHRiMP format (default: %s)\n",Eflag ? "disabled" : "enabled");
fprintf(stderr,
" --qv-offset (see README)\n");
fprintf(stderr,
" --sam-header-hd (see README)\n");
fprintf(stderr,
" --sam-header-sq (see README)\n");
fprintf(stderr,
" --sam-header-rg (see README)\n");
fprintf(stderr,
" --sam-header-pg (see README)\n");
fprintf(stderr,
" --no-autodetect-input (see README)\n");
}
fprintf(stderr, "\n");
fprintf(stderr, "Options:\n");
fprintf(stderr,
" -U/--ungapped Perform Ungapped Alignment (default: %s)\n", gapless_sw ? "enabled" : "disabled");
fprintf(stderr,
" --global Perform full global alignment (default: %s)\n", Gflag ? "enabled" : "disabled");
fprintf(stderr,
" --local Perform local alignment (default: %s)\n", Gflag ? "disabled" : "enabled");
if (shrimp_mode == MODE_COLOUR_SPACE) {
fprintf(stderr,
" --bfast Try to align like bfast (default: %s)\n", Bflag ? "enabled" : "disabled");
} else {
fprintf(stderr,
" --trim-illumina Trim trailing B qual values (default: %s)\n", trim_illumina ? "enabled" : "disabled");
}
fprintf(stderr,
" -C/--negative Negative Strand Aln. Only (default: %s)\n", Cflag ? "enabled" : "disabled");
fprintf(stderr,
" -F/--positive Positive Strand Aln. Only (default: %s)\n", Fflag ? "enabled" : "disabled");
fprintf(stderr,
" -P/--pretty Pretty Print Alignments (default: %s)\n", Pflag ? "enabled" : "disabled");
fprintf(stderr,
" -E/--sam Output SAM Format (default: %s)\n", Eflag ? "enabled" : "disabled");
fprintf(stderr,
" -Q/--fastq Reads are in fastq format (default: %s)\n", Qflag ? "enabled" : "disabled");
if (full_usage) {
fprintf(stderr,
" -R/--print-reads Print Reads in Output (default: %s)\n", Rflag ? "enabled" : "disabled");
// fprintf(stderr,
// " -T (does nothing since default) Reverse Tie-break on Negative Strand (default: enabled)\n");
fprintf(stderr,
" -t/--tiebreak-off Disable Reverse Tie-break\n");
fprintf(stderr,
" on Negative Strand (default: %s)\n",Tflag ? "enabled" : "disabled");
fprintf(stderr,
" -X/--isize-hist Print Insert Size Histogram (default: %s)\n", Xflag ? "enabled" : "disabled");
fprintf(stderr,
" -Y/--proj-hist Print Genome Proj. Histogram (default: %s)\n", Yflag ? "enabled" : "disabled");
fprintf(stderr,
" -Z/--bypass-off Disable Cache Bypass for SW\n");
fprintf(stderr,
" Vector Calls (default: %s)\n", hash_filter_calls ? "enabled" : "disabled");
fprintf(stderr,
" -H/--spaced-kmers Hash Spaced Kmers in Genome\n");
fprintf(stderr,
" Projection (default: %s)\n", Hflag ? "enabled" : "disabled");
fprintf(stderr,
" -D/--thread-stats Individual Thread Statistics (default: %s)\n", Dflag ? "enabled" : "disabled");
fprintf(stderr,
" -V/--trim-off Disable Automatic Genome\n");
fprintf(stderr,
" Index Trimming (default: %s)\n", Vflag ? "enabled" : "disabled");
}
fprintf(stderr,
" --sam-unaligned Unaligned reads in SAM output (default: %s)\n", sam_unaligned ? "enabled" : "disabled");
fprintf(stderr,
" --half-paired Output half mapped read pairs (default: %s)\n", half_paired ? "enabled" : "disabled");
fprintf(stderr,
" --strata Print only the best scoring hits\n");
fprintf(stderr,
" -?/--help Full List of Parameters and Options\n");
exit(1);
}
static void
print_pairing_options(struct pairing_options * options)
{
char buff[2][100];
fprintf(stderr, "[\n");
fprintf(stderr, " pairing:%s\n", pair_mode_string[options->pair_mode]);
fprintf(stderr, " min_insert:%d\n", options->min_insert_size);
fprintf(stderr, " max_insert:%d\n", options->max_insert_size);
fprintf(stderr, " pass1_num_outputs:%d\n", options->pass1_num_outputs);
fprintf(stderr, " pass1_threshold:%s\n", thres_to_buff(buff[0], &options->pass1_threshold));
fprintf(stderr, " pass2_num_outputs:%d\n", options->pass2_num_outputs);
fprintf(stderr, " pass2_threshold:%s\n", thres_to_buff(buff[0], &options->pass2_threshold));
fprintf(stderr, " strata:%s\n", bool_to_buff(buff[0], &options->strata));
fprintf(stderr, " save_outputs:%s\n", bool_to_buff(buff[0], &options->save_outputs));
fprintf(stderr, " stop_count:%d\n", options->stop_count);
fprintf(stderr, " stop_threshold:%s\n", thres_to_buff(buff[0], &options->stop_threshold));
fprintf(stderr, "]\n");
}
static void
print_read_mapping_options(struct read_mapping_options_t * options, bool is_paired)
{
char buff[2][100];
fprintf(stderr, "[\n");
fprintf(stderr, " regions:\n");
//fprintf(stderr, " [\n");
fprintf(stderr, " recompute:%s\n", bool_to_buff(buff[0], &options->regions.recompute));
//if (!options->regions.recompute)
//fprintf(stderr, " ]\n");
//else
// fprintf(stderr, ", min_seed:%d, max_seed:%d]\n",
// options->regions.min_seed, options->regions.max_seed);
fprintf(stderr, " anchor_list:\n");
//fprintf(stderr, " [\n");
fprintf(stderr, " recompute:%s\n", bool_to_buff(buff[0], &options->anchor_list.recompute));
if (options->anchor_list.recompute) {
fprintf(stderr, " collapse:%s\n", bool_to_buff(buff[0], &options->anchor_list.collapse));
fprintf(stderr, " use_region_counts:%s\n", bool_to_buff(buff[0], &options->anchor_list.use_region_counts));
fprintf(stderr, " use_mp_region_counts:%d\n", options->anchor_list.use_mp_region_counts);
}
//fprintf(stderr, " ]\n");
fprintf(stderr, " hit_list:\n");
//fprintf(stderr, " [\n");
fprintf(stderr, " recompute:%s\n", bool_to_buff(buff[0], &options->hit_list.recompute));
if (options->hit_list.recompute) {
fprintf(stderr, " gapless:%s\n", bool_to_buff(buff[0], &options->hit_list.gapless));
fprintf(stderr, " match_mode:%d\n", options->hit_list.match_mode);
fprintf(stderr, " threshold:%s\n", thres_to_buff(buff[0], &options->hit_list.threshold));
}
//fprintf(stderr, " ]\n");
fprintf(stderr, " pass1:\n");
//fprintf(stderr, " [\n");
fprintf(stderr, " recompute:%s\n", bool_to_buff(buff[0], &options->pass1.recompute));
if (options->pass1.recompute) {
fprintf(stderr, " threshold:%s\n", thres_to_buff(buff[0], &options->pass1.threshold));
fprintf(stderr, " window_overlap:%s\n", thres_to_buff(buff[1], &options->pass1.window_overlap));
fprintf(stderr, " min_matches:%d\n", options->pass1.min_matches);
fprintf(stderr, " gapless:%s\n", bool_to_buff(buff[0], &options->pass1.gapless));
if (is_paired) {
fprintf(stderr, " only_paired:%s\n", bool_to_buff(buff[0], &options->pass1.only_paired));
}
if (!is_paired) {
fprintf(stderr, " num_outputs:%d\n", options->pass1.num_outputs);
}
}
//fprintf(stderr, " ]\n");
fprintf(stderr, " pass2:\n");
//fprintf(stderr, " [\n");
fprintf(stderr, " threshold:%s\n", thres_to_buff(buff[0], &options->pass2.threshold));
if (!is_paired) {
fprintf(stderr, " strata:%s\n", bool_to_buff(buff[0], &options->pass2.strata));
fprintf(stderr, " save_outputs:%s\n", bool_to_buff(buff[0], &options->pass2.save_outputs));
fprintf(stderr, " num_outputs:%d\n", options->pass2.num_outputs);
}
//fprintf(stderr, " ]\n");
if (!is_paired) {
fprintf(stderr, " stop:\n");
//fprintf(stderr, " [\n");
fprintf(stderr, " stop_count:%d\n", options->pass2.stop_count);
if (options->pass2.stop_count > 0) {
fprintf(stderr, " stop_threshold:%s\n", thres_to_buff(buff[0], &options->pass2.stop_threshold));
}
//fprintf(stderr, " ]\n");
}
fprintf(stderr, "]\n");
}
static void
print_settings() {
char buff[100];
static char const my_tab[] = " ";
int sn, i;
fprintf(stderr, "Settings:\n");
// Seeds
fprintf(stderr, "%s%-40s%s (%d/%d)\n", my_tab,
(n_seeds == 1) ? "Spaced Seed (weight/span)" : "Spaced Seeds (weight/span)",
seed_to_string(0), seed[0].weight, seed[0].span);
for (sn = 1; sn < n_seeds; sn++) {
fprintf(stderr, "%s%-40s%s (%d/%d)\n", my_tab, "",
seed_to_string(sn), seed[sn].weight, seed[sn].span);
}
// Global settings
fprintf(stderr, "\n");
fprintf(stderr, "%s%-40s%d\n", my_tab, "Number of threads:", num_threads);
fprintf(stderr, "%s%-40s%d\n", my_tab, "Thread chunk size:", chunk_size);
fprintf(stderr, "%s%-40s%s\n", my_tab, "Window length:", thres_to_buff(buff, &window_len));
fprintf(stderr, "%s%-40s%s\n", my_tab, "Hash filter calls:", hash_filter_calls? "yes" : "no");
fprintf(stderr, "%s%-40s%d%s\n", my_tab, "Anchor width:", anchor_width,
anchor_width == -1? " (disabled)" : "");
fprintf(stderr, "%s%-40s%d%s\n", my_tab, "Indel taboo Len:", indel_taboo_len,
indel_taboo_len == 0? " (disabled)" : "");
if (list_cutoff < DEF_LIST_CUTOFF) {
fprintf(stderr, "%s%-40s%u\n", my_tab, "Index list cutoff length:", list_cutoff);
}
fprintf(stderr, "%s%-40s%s\n", my_tab, "Gapless mode:", gapless_sw? "yes" : "no");
fprintf(stderr, "%s%-40s%s\n", my_tab, "Global alignment:", Gflag? "yes" : "no");
fprintf(stderr, "%s%-40s%s\n", my_tab, "Region filter:", use_regions? "yes" : "no");
if (use_regions) {
fprintf(stderr, "%s%-40s%d\n", my_tab, "Region size:", (1 << region_bits));
fprintf(stderr, "%s%-40s%d\n", my_tab, "Region overlap:", region_overlap);
}
if (Qflag) {
fprintf(stderr, "%s%-40s%s\n", my_tab, "Ignore QVs:", ignore_qvs? "yes" : "no");
}
if (Qflag && !ignore_qvs) {
fprintf(stderr, "%s%-40s%d%s\n", my_tab, "Minimum average qv:", min_avg_qv, min_avg_qv < 0? " (none)" : "");
//fprintf(stderr, "%s%-40sPHRED+%d\n", my_tab, "QV input encoding:", qual_delta);
}
fprintf(stderr, "%s%-40s%s\n", my_tab, "Compute mapping qualities:", compute_mapping_qualities? "yes" : "no");
if (compute_mapping_qualities)
{
fprintf(stderr, "%s%-40s%s\n", my_tab, "All contigs:", all_contigs? "yes" : "no");
fprintf(stderr, "%s%-40s%s\n", my_tab, "Single best mapping:", single_best_mapping? "yes" : "no");
}
//fprintf(stderr, "%s%-40s%s\n", my_tab, "Hack:", hack? "yes" : "no");
// Scores
fprintf(stderr, "\n");
fprintf(stderr, "%s%-40s%-10d\n", my_tab, "SW Match Score:", match_score);
fprintf(stderr, "%s%-40s%-10d\t[%.1e]\n", my_tab, "SW Mismatch Score [Prob]:", mismatch_score, pr_mismatch);
fprintf(stderr, "%s%-40s%-10d\t[%.1e]\n", my_tab, "SW Del Open Score [Prob]:", a_gap_open_score, pr_del_open);
fprintf(stderr, "%s%-40s%-10d\t[%.1e]\n", my_tab, "SW Ins Open Score [Prob]:", b_gap_open_score, pr_ins_open);
fprintf(stderr, "%s%-40s%-10d\t[%.1e]\n", my_tab, "SW Del Extend Score [Prob]:", a_gap_extend_score, pr_del_extend);
fprintf(stderr, "%s%-40s%-10d\t[%.1e]\n", my_tab, "SW Ins Extend Score [Prob]:", b_gap_extend_score, pr_ins_extend);
if (shrimp_mode == MODE_COLOUR_SPACE) {
fprintf(stderr, "%s%-40s%-10d\t[%.1e]\n", my_tab, "SW Crossover Score [Prob]:", crossover_score, pr_xover);
}
fprintf(stderr, "\n");
if (n_paired_mapping_options > 0) { // paired mapping
for (i = 0; i < n_paired_mapping_options; i++) {
fprintf(stderr, "Paired mapping options, set [%d]\n", i);
print_pairing_options(&paired_mapping_options[i].pairing);
print_read_mapping_options(&paired_mapping_options[i].read[0], true);
print_read_mapping_options(&paired_mapping_options[i].read[1], true);
}
if (n_unpaired_mapping_options[0] > 0) {
fprintf(stderr, "\n");
for (i = 0; i < n_unpaired_mapping_options[0]; i++) {
fprintf(stderr, "Unpaired mapping options for first read in a pair, set [%d]\n", i);
print_read_mapping_options(&unpaired_mapping_options[0][i], false);
}
}
if (n_unpaired_mapping_options[1] > 0) {
fprintf(stderr, "\n");
for (i = 0; i < n_unpaired_mapping_options[1]; i++) {
fprintf(stderr, "Unpaired mapping options for second read in a pair, set [%d]\n", i);
print_read_mapping_options(&unpaired_mapping_options[1][i], false);
}
}
} else {
for (i = 0; i < n_unpaired_mapping_options[0]; i++) {
fprintf(stderr, "Unpaired mapping options, set [%d]\n", i);
print_read_mapping_options(&unpaired_mapping_options[0][i], false);
}
}
fprintf(stderr, "\n");
return;
fprintf(stderr, "%s%-40s%d\n", my_tab, "Number of Outputs per Read:", num_outputs);
fprintf(stderr, "%s%-40s%d\n", my_tab, "Window Generation Mode:", match_mode);
if (IS_ABSOLUTE(window_overlap)) {
fprintf(stderr, "%s%-40s%u\n", my_tab, "Window Overlap Length:", (uint)-window_overlap);
} else {
fprintf(stderr, "%s%-40s%.02f%%\n", my_tab, "Window Overlap Length:", window_overlap);
}
fprintf(stderr, "\n");
if (IS_ABSOLUTE(window_gen_threshold)) {
fprintf(stderr, "%s%-40s%u\n", my_tab,
"Window Generation Threshold:", (uint)-window_gen_threshold);
} else {
fprintf(stderr, "%s%-40s%.02f%%\n", my_tab,
"Window Generation Threshold:", window_gen_threshold);
}
if (shrimp_mode == MODE_COLOUR_SPACE) {
if (IS_ABSOLUTE(sw_vect_threshold)) {
fprintf(stderr, "%s%-40s%u\n", my_tab, "SW Vector Hit Threshold:", (uint)-sw_vect_threshold);
} else {
fprintf(stderr, "%s%-40s%.02f%%\n", my_tab, "SW Vector Hit Threshold:", sw_vect_threshold);
}
}
if (IS_ABSOLUTE(sw_full_threshold)) {
fprintf(stderr, "%s%-40s%u\n", my_tab,
shrimp_mode == MODE_COLOUR_SPACE? "SW Full Hit Threshold:" : "SW Hit Threshold",
(uint)-sw_full_threshold);
} else {
fprintf(stderr, "%s%-40s%.02f%%\n", my_tab,
shrimp_mode == MODE_COLOUR_SPACE? "SW Full Hit Threshold:" : "SW Hit Threshold",
sw_full_threshold);
}
fprintf(stderr, "\n");
fprintf(stderr, "%s%-40s%s\n", my_tab, "Paired mode:", pair_mode_string[pair_mode]);
if (pair_mode != PAIR_NONE) {
fprintf(stderr, "%s%-40smin:%d max:%d\n", my_tab, "Insert sizes:", min_insert_size, max_insert_size);
if (Xflag) {
fprintf(stderr, "%s%-40s%d\n", my_tab, "Bucket size:", insert_histogram_bucket_size);
}
}
fprintf(stderr, "\n");
}
static int
set_mode_from_string(char const * s) {
if (!strcmp(s, "mirna")) {
mode_mirna = true;
//load_default_mirna_seeds();
Hflag = true;
gapless_sw = true;
anchor_width = 0;
a_gap_open_score = -255;
b_gap_open_score = -255;
hash_filter_calls = false;
match_mode = 1;
window_len = 100.0;
Gflag = false;
compute_mapping_qualities = false;
return 1;
} else {
return 0;
}
}
static void
get_pair_mode(char * c, int * pair_mode)
{
if (c == NULL) {
fprintf(stderr, "error: invalid pair mode\n");
exit(1);
}
if (!strcmp(c, "none")) {
*pair_mode = PAIR_NONE;
} else if (!strcmp(c, "opp-in")) {
*pair_mode = PAIR_OPP_IN;
} else if (!strcmp(c, "opp-out")) {
*pair_mode = PAIR_OPP_OUT;
} else if (!strcmp(c, "col-fw")) {
*pair_mode = PAIR_COL_FW;
} else if (!strcmp(c, "col-bw")) {
*pair_mode = PAIR_COL_BW;
} else {
fprintf(stderr, "error: unrecognized pair mode (%s)\n", c);
exit(1);
}
}
static void
get_int(char * c, int * d)
{
if (c == NULL) {
fprintf(stderr, "error: invalid integer\n");
exit(1);
}
*d = atoi(c);
}
static void
get_threshold(char * c, double * t)
{
if (c == NULL) {
fprintf(stderr, "error: invalid threshold\n");
exit(1);
}
*t = atof(c);
if (*t < 0.0) {
fprintf(stderr, "error: invalid threshold [%s]\n", c);
exit(1);
}
if (strcspn(c, "%.") == strlen(c))
*t = -(*t); //absol.
}
static void
get_bool(char * c, bool * b)
{
if (!strcmp(c, "true") || !strcmp(c, "1")) {
*b = true;
} else if (!strcmp(c, "false") || !strcmp(c, "0")) {
*b = false;
} else {
fprintf(stderr, "error: invalid bool\n");
exit(1);
}
}
static void
get_pairing_options(char * c, struct pairing_options * options)
{
char * p;
if (c == NULL) {
fprintf(stderr, "error: invalid pairing options\n");
exit(1);
}
p = strtok(c, ",");
get_pair_mode(p, &options->pair_mode);
p = strtok(NULL, ",");
get_int(p, &options->min_insert_size);
p = strtok(NULL, ",");
get_int(p, &options->max_insert_size);
p = strtok(NULL, ",");
get_int(p, &options->pass1_num_outputs);
p = strtok(NULL, ",");
get_threshold(p, &options->pass1_threshold);
p = strtok(NULL, ",");
get_int(p, &options->pass2_num_outputs);
p = strtok(NULL, ",");
get_threshold(p, &options->pass2_threshold);
p = strtok(NULL, ",");
get_int(p, &options->stop_count);
p = strtok(NULL, ",");
get_threshold(p, &options->stop_threshold);
p = strtok(NULL, ",");
get_bool(p, &options->strata);
p = strtok(NULL, ",");
get_bool(p, &options->save_outputs);
}
static void
get_read_mapping_options(char * c, struct read_mapping_options_t * options, bool is_paired)
{
char * p, * q;
char * save_ptr;
if (c == NULL) {
fprintf(stderr, "error: invalid read mapping options\n");
exit(1);
}
fprintf(stderr, "parsing read_mapping_options [%s] (%s)\n", c, is_paired? "paired" : "unpaired");
// regions
q = strtok_r(c, "/", &save_ptr);
logit(0, "parsing region options: %s", q);
p = strtok(q, ",");
get_bool(p, &options->regions.recompute);
//if (options->regions.recompute) {
//p = strtok(NULL, ",");
//get_int(p, &options->regions.min_seed);
//p = strtok(NULL, ",");
//get_int(p, &options->regions.max_seed);
//}
// anchor_list
q = strtok_r(NULL, "/", &save_ptr);
logit(0, "parsing anchor_list options: %s", q);
p = strtok(q, ",");
get_bool(p, &options->anchor_list.recompute);
if (options->anchor_list.recompute) {
p = strtok(NULL, ",");
get_bool(p, &options->anchor_list.collapse);
p = strtok(NULL, ",");
get_bool(p, &options->anchor_list.use_region_counts);
if (is_paired) {
p = strtok(NULL, ",");
get_int(p, &options->anchor_list.use_mp_region_counts);
}
}
// hit_list
q = strtok_r(NULL, "/", &save_ptr);
logit(0, "parsing hit_list options: %s", q);
p = strtok(q, ",");
get_bool(p, &options->hit_list.recompute);
if (options->hit_list.recompute) {
p = strtok(NULL, ",");
get_bool(p, &options->hit_list.gapless);
p = strtok(NULL, ",");
get_int(p, &options->hit_list.match_mode);
p = strtok(NULL, ",");
get_threshold(p, &options->hit_list.threshold);
}
// pass1
q = strtok_r(NULL, "/", &save_ptr);
logit(0, "parsing pass1 options: %s", q);
p = strtok(q, ",");
get_bool(p, &options->pass1.recompute);
if (options->pass1.recompute) {
p = strtok(NULL, ",");
get_threshold(p, &options->pass1.threshold);
p = strtok(NULL, ",");
get_threshold(p, &options->pass1.window_overlap);
p = strtok(NULL, ",");
get_int(p, &options->pass1.min_matches);
p = strtok(NULL, ",");
get_bool(p, &options->pass1.gapless);
if (is_paired) {
p = strtok(NULL, ",");
get_bool(p, &options->pass1.only_paired);
}
if (!is_paired) {
p = strtok(NULL, ",");
get_int(p, &options->pass1.num_outputs);
}
}
// pass2
q = strtok_r(NULL, "/", &save_ptr);
logit(0, "parsing pass2 options: %s", q);
p = strtok(q, ",");
get_threshold(p, &options->pass2.threshold);
if (!is_paired) {
p = strtok(NULL, ",");
get_bool(p, &options->pass2.strata);
p = strtok(NULL, ",");
get_bool(p, &options->pass2.save_outputs);
p = strtok(NULL, ",");
get_int(p, &options->pass2.num_outputs);
}
// stop
if (!is_paired) {
q = strtok_r(NULL, "/", &save_ptr);
logit(0, "parsing stop options: %s", q);
p = strtok(q, ",");
get_int(p, &options->pass2.stop_count);
if (options->pass2.stop_count > 0) {
p = strtok(NULL, ",");
get_threshold(p, &options->pass2.stop_threshold);
}
}
}
int main(int argc, char **argv){
char **genome_files = NULL;
int ngenome_files = 0;
char *progname = argv[0];
char const * optstr = NULL;
char *c, *save_c;
int ch;
int i, sn, cn;
llint before;
bool a_gap_open_set, b_gap_open_set;
bool a_gap_extend_set, b_gap_extend_set;
bool match_score_set, mismatch_score_set, xover_score_set;
bool match_mode_set = false;
bool qual_delta_set = false;
fasta_t fasta = NULL, left_fasta = NULL, right_fasta = NULL;
my_alloc_init(64l*1024l*1024l*1024l, 64l*1024l*1024l*1024l);
shrimp_args.argc=argc;
shrimp_args.argv=argv;
set_mode_from_argv(argv, &shrimp_mode);
a_gap_open_set = b_gap_open_set = a_gap_extend_set = b_gap_extend_set = false;
match_score_set = mismatch_score_set = xover_score_set = false;
if (shrimp_mode == MODE_COLOUR_SPACE) {
match_score = DEF_CS_MATCH_SCORE;
mismatch_score = DEF_CS_MISMATCH_SCORE;
a_gap_open_score = DEF_CS_A_GAP_OPEN;
b_gap_open_score = DEF_CS_B_GAP_OPEN;
a_gap_extend_score = DEF_CS_A_GAP_EXTEND;
b_gap_extend_score = DEF_CS_B_GAP_EXTEND;
}
fasta_reset_stats();
fprintf(stderr, "--------------------------------------------------"
"------------------------------\n");
fprintf(stderr, "gmapper: %s.\nSHRiMP %s\n[%s; CXXFLAGS=\"%s\"]\n",
get_mode_string(shrimp_mode),
SHRIMP_VERSION_STRING,
get_compiler(), CXXFLAGS);
fprintf(stderr, "--------------------------------------------------"
"------------------------------\n");
struct option getopt_long_options[standard_entries+MAX(colour_entries,letter_entries)];
memcpy(getopt_long_options,standard_options,sizeof(standard_options));
//TODO -t -9 -d -Z -D -Y
switch(shrimp_mode){
case MODE_COLOUR_SPACE:
optstr = "?1:2:s:n:w:l:o:p:m:i:g:q:e:f:h:r:a:z:DCEFHI:K:L:M:N:PRS:TtUVXYZQx:v:";
memcpy(getopt_long_options+standard_entries,colour_space_options,sizeof(colour_space_options));
break;
case MODE_LETTER_SPACE:
optstr = "?1:2:s:n:w:l:o:p:m:i:g:q:e:f:h:r:a:z:DCEFHI:K:L:M:N:PRS:TtUVXYZQ";
memcpy(getopt_long_options+standard_entries,letter_space_options,sizeof(letter_space_options));
break;
case MODE_HELICOS_SPACE:
fprintf(stderr,"Helicose currently unsupported\n");
exit(1);
break;
}
//get a copy of the command line
size_t size_of_command_line=0;
for (i=0; i<argc; i++) {
size_of_command_line+=strlen(argv[i])+1;
}
size_of_command_line++;
char command_line[size_of_command_line];
size_t offset=0;
for (i=0; i<argc; i++) {
offset+=sprintf(command_line+offset,"%s",argv[i]);
if (i+1!=argc) {
offset+=sprintf(command_line+offset," ");
}
}
assert(offset+1<=size_of_command_line);
while ((ch = getopt_long(argc,argv,optstr,getopt_long_options,NULL)) != -1){
switch (ch) {
case 9:
strata_flag = true;
break;
case 10:
unaligned_reads_file=fopen(optarg,"w");
if (unaligned_reads_file==NULL) {
fprintf(stderr,"error: cannot open file \"%s\" for writting\n",optarg);
}
break;
case 11:
aligned_reads_file=fopen(optarg,"w");
if (aligned_reads_file==NULL) {
fprintf(stderr,"error: cannot open file \"%s\" for writting\n",optarg);
}
break;
case 12:
sam_unaligned=true;
break;
case 13:
longest_read_len=atoi(optarg);
if (longest_read_len<200) {
fprintf(stderr,"error: longest read length must be at least 200\n");
exit(1);
}
break;
case 14:
max_alignments=atoi(optarg);
break;
case 15:
logit(0, "as of v2.2.0, --global is on by default");
Gflag = true;
break;
case 16:
Bflag = true;
Gflag = true;
break;
case 17:
sam_read_group_name=strdup(optarg);
sam_sample_name = strchr(sam_read_group_name,',');
if (sam_sample_name==NULL) {
fprintf(stderr,"error: sam read group needs to be two values, delimited by commas.\n");
fprintf(stderr," the first value is unique read group identifier\n");
fprintf(stderr," the second is the sample (use pool name where a pool is being sequence\n");
exit(1);
}
sam_sample_name[0]='\0';
sam_sample_name++;
break;
case 18:
sam_header_filename=optarg;
break;
case 19:
half_paired = false;
break;
case 20:
sam_r2=true;
break;
case '1':
left_reads_filename = optarg;
break;
case '2':
right_reads_filename = optarg;
break;
case 's':
if (strchr(optarg, ',') == NULL) { // allow comma-separated seeds
if (optarg[0] == 'w') {
int weight = (int)atoi(&optarg[1]);
if (!load_default_seeds(weight)) {
fprintf(stderr, "error: invalid spaced seed weight (%d)\n", weight);
exit(1);
}
} else {
if (!add_spaced_seed(optarg)) {
fprintf(stderr, "error: invalid spaced seed \"%s\"\n", optarg);
exit (1);
}
}
} else {
c = strtok(optarg, ",");
do {
if (c[0] == 'w') {
int weight = (int)atoi(&c[1]);
if (!load_default_seeds(weight)) {
fprintf(stderr, "error: invalid spaced seed weight (%d)\n", weight);
exit(1);
}
} else {
if (!add_spaced_seed(c)) {
fprintf(stderr, "error: invalid spaced seed \"%s\"\n", c);
exit (1);
}
}
c = strtok(NULL, ",");
} while (c != NULL);
}
break;
case 'n':
match_mode = atoi(optarg);
match_mode_set = true;
break;
case 'w':
window_len = atof(optarg);
if (window_len <= 0.0) {
fprintf(stderr, "error: invalid window "
"length\n");
exit(1);
}
if (strcspn(optarg, "%.") == strlen(optarg))
window_len = -window_len; //absol.
break;
case 'o':
num_outputs = atoi(optarg);
num_tmp_outputs = 20 + num_outputs;
break;
case 'm':
match_score = atoi(optarg);
match_score_set = true;
break;
case 'i':
mismatch_score = atoi(optarg);
mismatch_score_set = true;
break;
case 'g':
a_gap_open_score = atoi(optarg);
a_gap_open_set = true;
break;
case 'q':
b_gap_open_score = atoi(optarg);
b_gap_open_set = true;
break;
case 'e':
a_gap_extend_score = atoi(optarg);
a_gap_extend_set = true;
break;
case 'f':
b_gap_extend_score = atoi(optarg);
b_gap_extend_set = true;
break;
case 'x':
assert(shrimp_mode == MODE_COLOUR_SPACE);
crossover_score = atoi(optarg);
xover_score_set = true;
break;
case 'h':
sw_full_threshold = atof(optarg);
if (sw_full_threshold < 0.0) {
fprintf(stderr, "error: invalid s-w full "
"hit threshold\n");
exit(1);
}
if (strcspn(optarg, "%.") == strlen(optarg))
sw_full_threshold = -sw_full_threshold; //absol.
break;
case 'v':
assert(shrimp_mode == MODE_COLOUR_SPACE);
sw_vect_threshold = atof(optarg);
if (sw_vect_threshold < 0.0) {
fprintf(stderr, "error: invalid s-w vector "
"hit threshold\n");
exit(1);
}
if (strcspn(optarg, "%.") == strlen(optarg))
sw_vect_threshold = -sw_vect_threshold; //absol.
break;
case 'r':
window_gen_threshold = atof(optarg);
if (window_gen_threshold < 0.0) {
fprintf(stderr, "error: invalid window generation threshold [%s]\n", optarg);
exit(1);
}
if (strcspn(optarg, "%.") == strlen(optarg))
window_gen_threshold = -window_gen_threshold; //absol.
break;
case 'C':
if (Fflag) {
fprintf(stderr, "error: -C and -F are mutually "
"exclusive\n");
exit(1);
}
Cflag = true;
break;
case 'F':
if (Cflag) {
fprintf(stderr, "error: -C and -F are mutually "
"exclusive\n");
exit(1);
}
Fflag = true;
break;
case 'H':
Hflag = true;
break;
case 'P':
Pflag = true;
Eflag = false;
break;
case 'R':
Rflag = true;
break;
case 't':
Tflag= false;
break;
case 'T':
Tflag = true;
break;
/*
* New options/parameters since SHRiMP 1.2.1
*/
case 'a':
anchor_width = atoi(optarg);
if (anchor_width < -1 || anchor_width >= 100) {
fprintf(stderr, "error: anchor_width requested is invalid (%s)\n",
optarg);
exit(1);
}
break;
case 'X':
Xflag = true;
break;
case 'Y':
Yflag = true;
break;
case 'l':
window_overlap = atof(optarg);
if (window_overlap <= 0.0) {
fprintf(stderr, "error: invalid window overlap\n");
exit(1);
}
if (strcspn(optarg, "%.") == strlen(optarg))
window_overlap = -window_overlap; //absol.
break;
case 'N':
num_threads = atoi(optarg);
break;
case 'K':
chunk_size = atoi(optarg);
break;
case 'S':
save_file = optarg;
break;
case 'L':
load_file = optarg;
break;
case 'D':
Dflag = true;
break;
case '?':
usage(progname, true);
break;
case 'Z':
hash_filter_calls = false;
break;
case 'U':
gapless_sw = true;
anchor_width = 0;
a_gap_open_score = -255;
b_gap_open_score = -255;
hash_filter_calls = false;
break;
case 'p':
if (!strcmp(optarg, "none")) {
pair_mode = PAIR_NONE;
} else if (!strcmp(optarg, "opp-in")) {
pair_mode = PAIR_OPP_IN;
} else if (!strcmp(optarg, "opp-out")) {
pair_mode = PAIR_OPP_OUT;
} else if (!strcmp(optarg, "col-fw")) {
pair_mode = PAIR_COL_FW;
} else if (!strcmp(optarg, "col-bw")) {
pair_mode = PAIR_COL_BW;
} else {
fprintf(stderr, "error: unrecognized pair mode (%s)\n", optarg);
exit(1);
}
break;
case 'I':
c = strtok(optarg, ",");
if (c == NULL)
crash(1, 0, "format for insert sizes is \"-I 200,1000\"\n");
min_insert_size = atoi(c);
if (min_insert_size < 0) {
logit(0, "insert sizes must be nonnegative; check README. resetting min_insert_size from [%s] to 0", optarg);
min_insert_size = 0;
}
c = strtok(NULL, ",");
if (c == NULL)
crash(1, 0, "format for insert sizes is \"-I 200,1000\"\n");
max_insert_size = atoi(c);
if (max_insert_size < 0) {
logit(0, "insert sizes must be nonnegative; check README. resetting max_insert_size from [%s] to 0", optarg);
max_insert_size = 0;
}
if (min_insert_size > max_insert_size)
crash(1, 0, "invalid insert sizes (min:%d,max:%d)\n", min_insert_size, max_insert_size);
break;
case 'E': // on by default; accept option for bw compatibility
logit(0, "as of v2.2.0, -E/--sam is on by default");
Eflag = true;
break;
case 'z':
list_cutoff = atoi(optarg);
if (list_cutoff == 0) {
fprintf(stderr, "error: invalid list cutoff (%s)\n", optarg);
exit(1);
}
break;
case 'V':
Vflag = false;
break;
case 'Q':
Qflag = true;
break;
case 'M':
c = strtok(optarg, ",");
do {
if (!set_mode_from_string(c)) {
fprintf(stderr, "error: unrecognized mode (%s)\n", c);
exit(1);
}
match_mode_set = true;
c = strtok(NULL, ",");
} while (c != NULL);
break;
case 21:
trim=true;
trim_front=atoi(optarg);
if (shrimp_mode == MODE_COLOUR_SPACE) {
fprintf(stderr,"--trim-front cannot be used in colour space mode!\n");
exit(1);
}
if (trim_front<0) {
fprintf(stderr,"--trim-front value must be positive\n");
exit(1);
}
break;
case 22:
trim=true;
trim_end=atoi(optarg);
if (trim_end<0) {
fprintf(stderr,"--trim-end value must be positive\n");
exit(1);
}
break;
case 23:
trim=true;
trim_first=true;
trim_second=false;
break;
case 24:
trim=true;
trim_second=true;
trim_first=false;
break;
case 25:
c = strtok(optarg, ",");
if (c == NULL)
crash(1, 0, "argmuent for insert-size-dist should be \"mean,stddev\" [%s]", optarg);
insert_size_mean = atof(c);
c = strtok(NULL, ",");
if (c == NULL)
crash(1, 0, "argmuent for insert-size-dist should be \"mean,stddev\" [%s]", optarg);
insert_size_stddev = atof(c);
break;
case 26:
use_regions = !use_regions;
break;
case 27:
region_overlap = atoi(optarg);
if (region_overlap < 0) {
fprintf(stderr, "region overlap must be non-negative!\n");
exit(1);
}
break;
case 28:
if (n_unpaired_mapping_options[0] > 0 || n_unpaired_mapping_options[1] > 0) {
fprintf(stderr, "warning: unpaired mapping options set before paired mapping options! the latter take precedence.\n");
half_paired = true;
}
n_paired_mapping_options++;
paired_mapping_options = (struct readpair_mapping_options_t *)
//xrealloc(paired_mapping_options, n_paired_mapping_options * sizeof(paired_mapping_options[0]));
my_realloc(paired_mapping_options, n_paired_mapping_options * sizeof(paired_mapping_options[0]), (n_paired_mapping_options - 1) * sizeof(paired_mapping_options[0]),
&mem_small, "paired_mapping_options");
c = strtok_r(optarg, ";", &save_c);
get_pairing_options(c, &paired_mapping_options[n_paired_mapping_options - 1].pairing);
c = strtok_r(NULL, ";", &save_c);
get_read_mapping_options(c, &paired_mapping_options[n_paired_mapping_options - 1].read[0], true);
c = strtok_r(NULL, ";", &save_c);
get_read_mapping_options(c, &paired_mapping_options[n_paired_mapping_options - 1].read[1], true);
// HACK SETTINGS
pair_mode = paired_mapping_options[0].pairing.pair_mode;
break;
case 29:
int nip;
c = strtok(optarg, ";");
if (c == NULL || (*c != '0' && *c != '1')) {
fprintf(stderr, "error: invalid unpaired mapping options:[%s]\n", optarg);
exit(1);
}
if (n_paired_mapping_options > 0)
half_paired = true;
nip = (*c == '0'? 0 : 1);
n_unpaired_mapping_options[nip]++;
unpaired_mapping_options[nip] = (struct read_mapping_options_t *)
//xrealloc(unpaired_mapping_options[nip], n_unpaired_mapping_options[nip] * sizeof(unpaired_mapping_options[nip][0]));
my_realloc(unpaired_mapping_options[nip], n_unpaired_mapping_options[nip] * sizeof(unpaired_mapping_options[nip][0]), (n_unpaired_mapping_options[nip] - 1) * sizeof(unpaired_mapping_options[nip][0]),
&mem_small, "unpaired_mapping_options[%d]", nip);
c = strtok(NULL, ";");
get_read_mapping_options(c, &unpaired_mapping_options[nip][n_unpaired_mapping_options[nip] - 1], false);
break;
case 30:
min_avg_qv = atoi(optarg);
if (min_avg_qv < -2 || min_avg_qv > 40) {
fprintf(stderr, "error: invalid minimum average quality value (%s)\n", optarg);
}
break;
case 31:
extra_sam_fields = true;
break;
case 32:
region_bits = atoi(optarg);
if (region_bits < 8 || region_bits > 20) {
crash(1, 0, "invalid number of region bits: %s; must be between 8 and 20", optarg);
}
n_regions = (1 << (32 - region_bits));
break;
case 33:
progress = atoi(optarg);
break;
case 34:
save_mmap = optarg;
break;
case 35:
load_mmap = optarg;
break;
case 36:
indel_taboo_len = atoi(optarg);
if (indel_taboo_len < 0)
crash(1, 0, "invalid indel taboo len: [%s]", optarg);
break;
case 37:
single_best_mapping = true;
break;
case 38:
all_contigs = true;
break;
case 39:
compute_mapping_qualities = false;
break;
case 40:
Eflag = false;
break;
case 41: // half-paired: accept option for bw compatibility
logit(0, "as of v2.2.0, --half-paired is on by default");
half_paired = true;
break;
case 42: // no-improper-mappings
improper_mappings = false;
break;
case 43: // qual value offset
qual_delta = atoi(optarg);
qual_delta_set = true;
break;
case 44: // sam-header-hd
sam_header_hd = fopen(optarg, "r");
if (sam_header_hd == NULL)
crash(1, 1, "cannot open sam header file with HD lines [%s]", optarg);
break;
case 45: // sam-header-sq
sam_header_sq = fopen(optarg, "r");
if (sam_header_sq == NULL)
crash(1, 1, "cannot open sam header file with SQ lines [%s]", optarg);
break;
case 46: // sam-header-rg
sam_header_rg = fopen(optarg, "r");
if (sam_header_rg == NULL)
crash(1, 1, "cannot open sam header file with RG lines [%s]", optarg);
break;
case 47: // sam-header-pg
sam_header_pg = fopen(optarg, "r");
if (sam_header_pg == NULL)
crash(1, 1, "cannot open sam header file with PG lines [%s]", optarg);
break;
case 48: // no-autodetect-input
autodetect_input = false;
break;
case 3:
trim_illumina=true;
break;
case 123:
no_qv_check=true;
break;
case 124: // local alignment
Gflag = false;
break;
case 125: // --ignore-qvs
ignore_qvs = true;
break;
#ifdef ENABLE_LOW_QUALITY_FILTER
case 126: //enable-seed-qual-filter
SQFflag = true;
break;
#endif
default:
usage(progname, false);
}
}
argc -= optind;
argv += optind;
if ((pair_mode != PAIR_NONE || !single_reads_file) && (chunk_size % 2) != 0) {
logit(0, "in paired mode or if using options -1 and -2, the thread chunk size must be even; adjusting it to [%d]", chunk_size + 1);
chunk_size++;
}
if (!Gflag && compute_mapping_qualities) {
logit(0, "mapping qualities are not available in local alignment mode; disabling them");
compute_mapping_qualities = false;
}
if (Gflag && gapless_sw) {
fprintf(stderr,"error: cannot use global (or bfast) and ungapped mode at the same time!\n");
usage(progname,false);
}
if (sam_unaligned && !Eflag) {
fprintf(stderr,"error: when using flag --sam-unaligned must also use -E/--sam\n");
usage(progname,false);
}
if (right_reads_filename != NULL || left_reads_filename !=NULL) {
if (right_reads_filename == NULL || left_reads_filename == NULL ){
fprintf(stderr,"error: when using \"%s\" must also specify \"%s\"\n",
(left_reads_filename != NULL) ? "-1" : "-2",
(left_reads_filename != NULL) ? "-2" : "-1");
usage(progname,false);
}
single_reads_file=false;
if (strcmp(right_reads_filename,"-")==0 && strcmp(left_reads_filename,"-")==0) {
fprintf(stderr,"error: both -1 and -2 arguments cannot be stdin (\"-\")\n");
usage(progname,false);
}
}
if (!match_mode_set) {
match_mode = (pair_mode == PAIR_NONE? DEF_MATCH_MODE_UNPAIRED : DEF_MATCH_MODE_PAIRED);
}
if (pair_mode == PAIR_NONE && (!trim_first || !trim_second)) {
fprintf(stderr,"error: cannot use --trim-first or --trim-second in unpaired mode\n");
usage(progname,false);
}
// set up insert size histogram
if (Xflag && pair_mode == PAIR_NONE) {
fprintf(stderr, "warning: insert histogram not available in unpaired mode; ignoring\n");
Xflag = false;
}
if (pair_mode != PAIR_NONE) {
insert_histogram_bucket_size = ceil_div(max_insert_size - min_insert_size + 1, 100);
for (i = 0; i < 100; i++) {
insert_histogram[i] = 1;
insert_histogram_load += insert_histogram[i];
}
}
if(load_file != NULL && n_seeds != 0){
fprintf(stderr,"error: cannot specify seeds when loading genome map\n");
usage(progname,false);
}
if (n_seeds == 0 && load_file == NULL && load_mmap == NULL) {
if (mode_mirna)
load_default_mirna_seeds();
else
load_default_seeds(0);
}
if (Hflag){
init_seed_hash_mask();
}
if (save_file != NULL && load_file != NULL && list_cutoff == DEF_LIST_CUTOFF){
fprintf(stderr,"error: -L and -S allowed together only if list_cutoff is specified\n");
exit(1);
}
if (load_file != NULL && (save_file != NULL || save_mmap != NULL))
{ // args: none
if (argc != 0) {
fprintf(stderr, "error: when using both -L and -S, no extra files can be given\n");
usage(progname, false);
}
}
else if (load_file != NULL || load_mmap != NULL)
{ // args: reads file
if (argc == 0 && single_reads_file) {
fprintf(stderr,"error: read_file not specified\n");
usage(progname, false);
} else if (argc == 1) {
if (single_reads_file) {
reads_filename = argv[0];
} else {
fprintf(stderr,"error: cannot specify a reads file when using -L, -1 and -2\n");
usage(progname,false);
}
}
}
else if (save_file != NULL)
{ // args: genome file(s)
if (argc == 0){
fprintf(stderr, "error: genome_file(s) not specified\n");
usage(progname,false);
}
genome_files = &argv[0];
ngenome_files = argc;
}
else if (single_reads_file)
{ // args: reads file, genome file(s)
if (argc < 2) {
fprintf(stderr, "error: %sgenome_file(s) not specified\n",
(argc == 0) ? "reads_file, " : "");
usage(progname, false);
}
reads_filename = argv[0];
genome_files = &argv[1];
ngenome_files = argc - 1;
}
else
{
if( argc < 1) {
fprintf(stderr, "error: genome_file(s) not specified\n");
usage(progname, false);
}
genome_files = &argv[0];
ngenome_files = argc;
}
if (!Cflag && !Fflag) {
Cflag = Fflag = true;
}
if (pair_mode != PAIR_NONE && (!Cflag || !Fflag)) {
fprintf(stderr, "warning: in paired mode, both strands must be inspected; ignoring -C and -F\n");
Cflag = Fflag = true;
}
/*
if (pair_mode == PAIR_NONE && half_paired) {
fprintf(stderr, "error: cannot use option half-paired in non-paired mode!\n");
exit(1);
}
*/
if (pair_mode == PAIR_NONE && sam_r2) {
fprintf(stderr, "error: cannot use option sam-r2 in non-paired mode!\n");
exit(1);
}
if (shrimp_mode == MODE_LETTER_SPACE) {
sw_vect_threshold = sw_full_threshold;
}
if (Eflag && Pflag) {
fprintf(stderr,"-E and -P are incompatable\n");
exit(1);
}
if (Eflag && Rflag) {
fprintf(stderr,"-E and -R are incompatable\n");
exit(1);
}
if (!valid_spaced_seeds()) {
fprintf(stderr, "error: invalid spaced seed\n");
if (!Hflag)
fprintf(stderr, " for longer seeds, try using the -H flag\n");
exit(1);
}
if (!IS_ABSOLUTE(window_len) && window_len < 100.0) {
fprintf(stderr, "error: window length < 100%% of read length\n");
exit(1);
}
if (!IS_ABSOLUTE(window_overlap) && window_overlap > 100.0) {
fprintf(stderr, "warning: window overlap length > 100%% of window_length; resetting to 100%%\n");
window_overlap = 100.0;
}
if ((pair_mode == PAIR_NONE && (match_mode < 1 || match_mode > 2))
|| (pair_mode != PAIR_NONE && (match_mode < 2 || match_mode > 4))) {
fprintf(stderr, "error: invalid match mode [pair_mode=%d;match_mode=%d]\n", pair_mode, match_mode);
exit(1);
}
if (num_outputs < 1) {
fprintf(stderr, "error: invalid maximum hits per read\n");
exit(1);
}
if (a_gap_open_score > 0 || b_gap_open_score > 0) {
fprintf(stderr, "error: invalid gap open penalty\n");
exit(1);
}
if (a_gap_extend_score > 0 || b_gap_extend_score > 0) {
fprintf(stderr, "error: invalid gap extend penalty\n");
exit(1);
}
if (!IS_ABSOLUTE(sw_full_threshold) && sw_full_threshold > 100.0) {
fprintf(stderr, "error: invalid s-w full hit threshold\n");
exit(1);
}
if (shrimp_mode == MODE_COLOUR_SPACE && !IS_ABSOLUTE(sw_vect_threshold)
&& sw_vect_threshold > 100.0) {
fprintf(stderr, "error: invalid s-w vector threshold\n");
exit(1);
}
if (!IS_ABSOLUTE(window_gen_threshold) && window_gen_threshold > 100.0) {
fprintf(stderr, "error: invalid window generation threshold\n");
exit(1);
}
if ((IS_ABSOLUTE(window_gen_threshold) && IS_ABSOLUTE(sw_full_threshold)
&& -window_gen_threshold > -sw_full_threshold)
||
(!IS_ABSOLUTE(window_gen_threshold) && !IS_ABSOLUTE(sw_full_threshold)
&& window_gen_threshold > sw_full_threshold)) {
//fprintf(stderr, "warning: window generation threshold is larger than sw threshold\n");
}
if ((a_gap_open_set && !b_gap_open_set) || (a_gap_extend_set && !b_gap_extend_set)) {
fputc('\n', stderr);
}
if (a_gap_open_set && !b_gap_open_set) {
fprintf(stderr, "Notice: Gap open penalty set for reference but not query; assuming symmetry.\n");
b_gap_open_score = a_gap_open_score;
}
if (a_gap_extend_set && !b_gap_extend_set) {
fprintf(stderr, "Notice: Gap extend penalty set for reference but not query; assuming symmetry.\n");
b_gap_extend_score = a_gap_extend_score;
}
if ((a_gap_open_set && !b_gap_open_set) || (a_gap_extend_set && !b_gap_extend_set)) {
fputc('\n', stderr);
}
/* Set probabilities from scores */
if (shrimp_mode == MODE_COLOUR_SPACE) {
// CS: pr_xover ~= .03 => alpha => pr_mismatch => rest
pr_xover = .03;
score_alpha = (double)crossover_score / (log(pr_xover/3)/log(2.0));
pr_mismatch = 1.0/(1.0 + 1.0/3.0 * pow(2.0, ((double)match_score - (double)mismatch_score)/score_alpha));
} else {
// LS: pr_mismatch ~= .01 => alpha => rest
pr_mismatch = .01;
score_alpha = ((double)match_score - (double)mismatch_score)/(log((1 - pr_mismatch)/(pr_mismatch/3.0))/log(2.0));
}
score_beta = (double)match_score - 2 * score_alpha - score_alpha * log(1 - pr_mismatch)/log(2.0);
pr_del_open = pow(2.0, (double)a_gap_open_score/score_alpha);
pr_ins_open = pow(2.0, (double)b_gap_open_score/score_alpha);
pr_del_extend = pow(2.0, (double)a_gap_extend_score/score_alpha);
pr_ins_extend = pow(2.0, ((double)b_gap_extend_score - score_beta)/score_alpha);
//score_difference_mq_cutoff = (int)rint(10.0 * score_alpha);
#ifdef DEBUG_SCORES
fprintf(stderr, "probabilities from scores:\talpha=%.9g\tbeta=%.9g\n", score_alpha, score_beta);
if (shrimp_mode == MODE_COLOUR_SPACE)
fprintf(stderr, "pr_xover=%.9g\n", pr_xover);
fprintf(stderr, "pr_mismatch=%.9g\npr_del_open=%.9g\tpr_del_extend=%.9g\t(pr_del1=%.9g)\npr_ins_open=%.9g\tpr_ins_extend=%.9g\t(pr_ins1=%.9g)\n",
pr_mismatch,
pr_del_open, pr_del_extend, pr_del_open*pr_del_extend,
pr_ins_open, pr_ins_extend, pr_ins_open*pr_ins_extend);
// sanity check:
fprintf(stderr, "scores from probabilities:\n");
fprintf(stderr, "match_score=%g\nmismatch_score=%g\n",
2 * score_alpha + score_beta + score_alpha * log(1 - pr_mismatch) / log(2.0),
2 * score_alpha + score_beta + score_alpha * log(pr_mismatch/3) / log(2.0));
if (shrimp_mode == MODE_COLOUR_SPACE)
fprintf(stderr, "crossover_score=%g\n", score_alpha * log(pr_xover/3) / log(2.0));
fprintf(stderr, "a_gap_open_score=%g\ta_gap_extend_score=%g\nb_gap_open_score=%g\tb_gap_extend_score=%g\n",
score_alpha * log(pr_del_open) / log(2.0),
score_alpha * log(pr_del_extend) / log(2.0),
score_alpha * log(pr_ins_open) / log(2.0),
score_alpha * log(pr_ins_extend) / log(2.0) + score_beta);
#endif
// set up new options structure
// THIS SHOULD EVENTUALLY BE MERGED INTO OPTION READING
if (n_unpaired_mapping_options[0] == 0 && n_paired_mapping_options == 0) {
if (pair_mode == PAIR_NONE)
{
n_unpaired_mapping_options[0]++;
//unpaired_mapping_options[0] = (struct read_mapping_options_t *)xcalloc(n_unpaired_mapping_options[0] * sizeof(unpaired_mapping_options[0][0]));
unpaired_mapping_options[0] = (struct read_mapping_options_t *)
my_calloc(n_unpaired_mapping_options[0] * sizeof(unpaired_mapping_options[0][0]),
&mem_small, "unpaired_mapping_options[0]");
unpaired_mapping_options[0][0].regions.recompute = (match_mode == 2 && use_regions);
//unpaired_mapping_options[0][0].regions.min_seed = -1;
//unpaired_mapping_options[0][0].regions.max_seed = -1;
unpaired_mapping_options[0][0].anchor_list.recompute = true;
unpaired_mapping_options[0][0].anchor_list.collapse = true;
unpaired_mapping_options[0][0].anchor_list.use_region_counts = (match_mode == 2 && use_regions);
unpaired_mapping_options[0][0].anchor_list.use_mp_region_counts = 0;
unpaired_mapping_options[0][0].hit_list.recompute = true;
unpaired_mapping_options[0][0].hit_list.gapless = gapless_sw;
unpaired_mapping_options[0][0].hit_list.match_mode = match_mode;
unpaired_mapping_options[0][0].hit_list.threshold = window_gen_threshold;
unpaired_mapping_options[0][0].pass1.recompute = true;
unpaired_mapping_options[0][0].pass1.only_paired = false;
unpaired_mapping_options[0][0].pass1.gapless = gapless_sw;
unpaired_mapping_options[0][0].pass1.min_matches = match_mode; // this is 1 or 2 in unpaired mode
unpaired_mapping_options[0][0].pass1.num_outputs = num_tmp_outputs;
unpaired_mapping_options[0][0].pass1.threshold = sw_vect_threshold;
unpaired_mapping_options[0][0].pass1.window_overlap = window_overlap;
unpaired_mapping_options[0][0].pass2.strata = strata_flag;
unpaired_mapping_options[0][0].pass2.save_outputs = false;
unpaired_mapping_options[0][0].pass2.num_outputs = num_outputs;
unpaired_mapping_options[0][0].pass2.threshold = sw_full_threshold;
unpaired_mapping_options[0][0].pass2.stop_count = 0;
}
else
{
n_paired_mapping_options++;
//paired_mapping_options = (struct readpair_mapping_options_t *)xcalloc(n_paired_mapping_options * sizeof(paired_mapping_options[0]));
paired_mapping_options = (struct readpair_mapping_options_t *)
my_calloc(n_paired_mapping_options * sizeof(paired_mapping_options[0]),
&mem_small, "paired_mapping_options");
paired_mapping_options[0].pairing.pair_mode = pair_mode;
paired_mapping_options[0].pairing.min_insert_size = min_insert_size;
paired_mapping_options[0].pairing.max_insert_size = max_insert_size;
paired_mapping_options[0].pairing.strata = strata_flag;
paired_mapping_options[0].pairing.save_outputs = compute_mapping_qualities;
paired_mapping_options[0].pairing.pass1_num_outputs = num_tmp_outputs;
paired_mapping_options[0].pairing.pass2_num_outputs = num_outputs;
paired_mapping_options[0].pairing.pass1_threshold = sw_vect_threshold;
paired_mapping_options[0].pairing.pass2_threshold = sw_full_threshold;
paired_mapping_options[0].read[0].regions.recompute = use_regions && match_mode != 2;
//paired_mapping_options[0].read[0].regions.min_seed = -1;
//paired_mapping_options[0].read[0].regions.max_seed = -1;
paired_mapping_options[0].read[0].anchor_list.recompute = true;
paired_mapping_options[0].read[0].anchor_list.collapse = true;
paired_mapping_options[0].read[0].anchor_list.use_region_counts = use_regions && match_mode != 2;
if (use_regions) {
paired_mapping_options[0].read[0].anchor_list.use_mp_region_counts = (match_mode == 4 && !half_paired? 1
: match_mode == 3 && half_paired? 2
: match_mode == 3 && !half_paired? 3
: 0);
}
paired_mapping_options[0].read[0].hit_list.recompute = true;
paired_mapping_options[0].read[0].hit_list.gapless = gapless_sw;
paired_mapping_options[0].read[0].hit_list.match_mode = (match_mode == 4? 2
: match_mode == 3? 3
: 1);
paired_mapping_options[0].read[0].hit_list.threshold = window_gen_threshold;
paired_mapping_options[0].read[0].pass1.recompute = true;
paired_mapping_options[0].read[0].pass1.only_paired = true;
paired_mapping_options[0].read[0].pass1.gapless = gapless_sw;
paired_mapping_options[0].read[0].pass1.min_matches = (match_mode == 4? 2 : 1);
paired_mapping_options[0].read[0].pass1.threshold = sw_vect_threshold;
paired_mapping_options[0].read[0].pass1.window_overlap = window_overlap;
paired_mapping_options[0].read[0].pass2.strata = strata_flag;
paired_mapping_options[0].read[0].pass2.threshold = sw_full_threshold * 0.5;
paired_mapping_options[0].read[1] = paired_mapping_options[0].read[0];
if (!half_paired)
{
paired_mapping_options[0].pairing.stop_count = 0;
}
else // half_paired
{
paired_mapping_options[0].pairing.stop_count = 1;
paired_mapping_options[0].pairing.stop_threshold = 101.0; //paired_mapping_options[0].pairing.pass2_threshold;
n_unpaired_mapping_options[0]++;
n_unpaired_mapping_options[1]++;
//unpaired_mapping_options[0] = (struct read_mapping_options_t *)xcalloc(n_unpaired_mapping_options[0] * sizeof(unpaired_mapping_options[0][0]));
unpaired_mapping_options[0] = (struct read_mapping_options_t *)
my_calloc(n_unpaired_mapping_options[0] * sizeof(unpaired_mapping_options[0][0]),
&mem_small, "unpaired_mapping_options[0]");
//unpaired_mapping_options[1] = (struct read_mapping_options_t *)xcalloc(n_unpaired_mapping_options[1] * sizeof(unpaired_mapping_options[1][0]));
unpaired_mapping_options[1] = (struct read_mapping_options_t *)
my_calloc(n_unpaired_mapping_options[1] * sizeof(unpaired_mapping_options[1][0]),
&mem_small, "unpaired_mapping_options[1]");
unpaired_mapping_options[0][0].regions.recompute = false;
unpaired_mapping_options[0][0].anchor_list.recompute = false;
unpaired_mapping_options[0][0].hit_list.recompute = false;
unpaired_mapping_options[0][0].pass1.recompute = true;
unpaired_mapping_options[0][0].pass1.gapless = gapless_sw;
unpaired_mapping_options[0][0].pass1.min_matches = 2;
unpaired_mapping_options[0][0].pass1.only_paired = false;
unpaired_mapping_options[0][0].pass1.num_outputs = num_tmp_outputs;
unpaired_mapping_options[0][0].pass1.threshold = sw_vect_threshold;
unpaired_mapping_options[0][0].pass1.window_overlap = window_overlap;
unpaired_mapping_options[0][0].pass2.strata = strata_flag;
unpaired_mapping_options[0][0].pass2.save_outputs = compute_mapping_qualities;
unpaired_mapping_options[0][0].pass2.num_outputs = num_outputs;
unpaired_mapping_options[0][0].pass2.threshold = sw_full_threshold;
unpaired_mapping_options[0][0].pass2.stop_count = 0;
unpaired_mapping_options[1][0] = unpaired_mapping_options[0][0];
}
}
}
if(load_file == NULL && load_mmap == NULL) {
print_settings();
}
if (load_file != NULL && save_mmap != NULL) {
exit(genome_load_map_save_mmap(load_file, save_mmap) == true ? 0 : 1);
}
before = gettimeinusecs();
if (load_mmap != NULL) {
genome_load_mmap(load_mmap);
} else if (load_file != NULL){
if (strchr(load_file, ',') == NULL) {
//use prefix
int buf_size = strlen(load_file) + 20;
//char * genome_name = (char *)xmalloc(sizeof(char)*buf_size);
char genome_name[buf_size];
strncpy(genome_name,load_file,buf_size);
strncat(genome_name,".genome",buf_size);
fprintf(stderr,"Loading genome from %s\n",genome_name);
if (!load_genome_map(genome_name)){
fprintf(stderr, "error: loading from genome file \"%s\"\n", genome_name);
exit (1);
}
//free(genome_name);
int seed_count = 0;
//char * seed_name = (char *)xmalloc(sizeof(char)*buf_size);
char seed_name[buf_size];
//char * buff = (char *)xmalloc(sizeof(char)*buf_size);
char buff[buf_size];
strncpy(seed_name,load_file,buf_size);
strncat(seed_name,".seed.",buf_size);
sprintf(buff,"%d",seed_count);
strncat(seed_name,buff,buf_size);
FILE *f = fopen(seed_name,"r");
while(f != NULL){
fclose(f);
fprintf(stderr,"Loading seed from %s\n",seed_name);
if (!load_genome_map_seed(seed_name)) {
fprintf(stderr, "error: loading from map file \"%s\"\n", seed_name);
exit (1);
}
seed_count++;
strncpy(seed_name,load_file,buf_size);
strncat(seed_name,".seed.",buf_size);
sprintf(buff,"%d",seed_count);
strncat(seed_name,buff,buf_size);
f = fopen(seed_name,"r");
}
//free(seed_name);
//free(buff);
} else {
c = strtok(load_file, ",");
fprintf(stderr,"Loading genome from %s\n",c);
if (!load_genome_map(c)){
fprintf(stderr, "error: loading from genome file \"%s\"\n", c);
exit (1);
}
c = strtok(NULL, ",");
do {
fprintf(stderr,"Loading seed from %s\n",c);
if (!load_genome_map_seed(c)) {
fprintf(stderr, "error: loading from map file \"%s\"\n", c);
exit (1);
}
c = strtok(NULL, ",");
} while (c != NULL);
}
if (Hflag) {
init_seed_hash_mask();
}
//print_settings();
} else {
if (!load_genome(genome_files,ngenome_files)){
exit(1);
}
}
load_genome_usecs += (gettimeinusecs() - before);
// initialize general search tree for contig offsets
gen_st_init(&contig_offsets_gen_st, 17, contig_offsets, num_contigs);
//
// Automatic genome index trimming
//
if (Vflag && save_file == NULL && list_cutoff == DEF_LIST_CUTOFF) {
// this will be a mapping job; enable automatic trimming
long long unsigned int total_genome_len = 0;
int max_seed_weight = 0;
for (i = 0; i < num_contigs; i++) {
total_genome_len += (long long unsigned int)genome_len[i];
}
if (Hflag) {
max_seed_weight = HASH_TABLE_POWER;
} else {
for (sn = 0; sn < n_seeds; sn++) {
if (seed[sn].weight > max_seed_weight) {
max_seed_weight = seed[sn].weight;
}
}
}
// cutoff := max (1000, 100*(total_genome_len/4^max_seed_weight))
list_cutoff = 1000;
if ((uint32_t)((100llu * total_genome_len)/power(4, max_seed_weight)) > list_cutoff) {
list_cutoff = (uint32_t)((100llu * total_genome_len)/power(4, max_seed_weight));
}
//fprintf(stderr, " %-40s%d\n", "Trimming index lists longer than:", list_cutoff);
//fprintf(stderr, "\n");
}
if (load_file != NULL || load_mmap != NULL) {
print_settings();
}
if (Yflag)
print_genomemap_stats();
if (save_file != NULL) {
if (list_cutoff != DEF_LIST_CUTOFF) {
fprintf(stderr, "\nTrimming index lists longer than: %u\n", list_cutoff);
trim_genome();
}
fprintf(stderr,"Saving genome map to %s\n",save_file);
if(!save_genome_map(save_file)){
exit(1);
}
exit(0);
}
// compute total genome size
for (cn = 0; cn < num_contigs; cn++)
total_genome_size += genome_len[cn];
//TODO setup need max window and max read len
//int longest_read_len = 2000;
int max_window_len = (int)abs_or_pct(window_len,longest_read_len);
// open input files, and set Qflag accordingly
if (single_reads_file) {
fasta = fasta_open(reads_filename, shrimp_mode, Qflag, autodetect_input? &Qflag : NULL);
if (fasta == NULL) {
crash(1, 1, "failed to open read file [%s]", reads_filename);
} else {
fprintf(stderr, "- Processing read file [%s]\n", reads_filename);
}
} else {
left_fasta = fasta_open(left_reads_filename, shrimp_mode, Qflag, autodetect_input? &Qflag : NULL);
if (left_fasta == NULL) {
crash(1, 1, "failed to open read file [%s]", left_reads_filename);
}
right_fasta = fasta_open(right_reads_filename, shrimp_mode, Qflag);
if (right_fasta == NULL) {
crash(1, 1, "failed to open read file [%s]", right_reads_filename);
}
// WHY? the code above sets both ->space to shrimp_mode anyhow
//if (right_fasta->space != left_fasta->space) {
// fprintf(stderr,"error: when using -1 and -2, both files must be either only colour space or only letter space!\n");
// return (false);
//}
fasta = left_fasta;
fprintf(stderr, "- Processing read files [%s , %s]\n", left_reads_filename, right_reads_filename);
}
// set default quality value delta
if (Qflag) {
if (!qual_delta_set) {
if (shrimp_mode == MODE_LETTER_SPACE)
qual_delta = DEF_LS_QUAL_DELTA;
else
qual_delta = DEF_CS_QUAL_DELTA;
logit(0, "quality value format not set explicitly; using PHRED+%d", qual_delta);
} else {
logit(0, "quality value format set to PHRED+%d", qual_delta);
}
}
#pragma omp parallel shared(longest_read_len,max_window_len,a_gap_open_score, a_gap_extend_score, b_gap_open_score, b_gap_extend_score,\
match_score, mismatch_score,shrimp_mode,crossover_score,anchor_width) num_threads(num_threads)
{
// init thread-private globals
memset(&tpg, 0, sizeof(tpg_t));
tpg.wait_tc.type = DEF_FAST_TIME_COUNTER;
tpg.region_counts_tc.type = DEF_FAST_TIME_COUNTER;
tpg.mp_region_counts_tc.type = DEF_FAST_TIME_COUNTER;
tpg.anchor_list_tc.type = DEF_FAST_TIME_COUNTER;
tpg.hit_list_tc.type = DEF_FAST_TIME_COUNTER;
tpg.get_vector_hits_tc.type = DEF_FAST_TIME_COUNTER;
tpg.pass1_tc.type = DEF_FAST_TIME_COUNTER;
tpg.pass2_tc.type = DEF_FAST_TIME_COUNTER;
tpg.duplicate_removal_tc.type = DEF_FAST_TIME_COUNTER;
/* region handling */
if (use_regions) {
region_map_id = 0;
for (int number_in_pair = 0; number_in_pair < 2; number_in_pair++)
for (int st = 0; st < 2; st++)
//region_map[number_in_pair][st] = (int32_t *)xcalloc(n_regions * sizeof(region_map[0][0][0]));
region_map[number_in_pair][st] = (region_map_t *)
my_calloc(n_regions * sizeof(region_map[0][0][0]),
&mem_mapping, "region_map");
}
if (f1_setup(max_window_len, longest_read_len,
a_gap_open_score, a_gap_extend_score, b_gap_open_score, b_gap_extend_score,
match_score, shrimp_mode == MODE_LETTER_SPACE? mismatch_score : match_score + crossover_score,
shrimp_mode == MODE_COLOUR_SPACE, true)) {
fprintf(stderr, "failed to initialise vector Smith-Waterman (%s)\n", strerror(errno));
exit(1);
}
int ret;
if (shrimp_mode == MODE_COLOUR_SPACE) {
/* XXX - a vs. b gap */
ret = sw_full_cs_setup(max_window_len, longest_read_len,
a_gap_open_score, a_gap_extend_score, b_gap_open_score, b_gap_extend_score,
match_score, mismatch_score,
crossover_score, true, anchor_width, indel_taboo_len);
} else {
ret = sw_full_ls_setup(max_window_len, longest_read_len,
a_gap_open_score, a_gap_extend_score, b_gap_open_score, b_gap_extend_score,
match_score, mismatch_score, true, anchor_width);
}
if (ret) {
fprintf(stderr, "failed to initialise scalar Smith-Waterman (%s)\n", strerror(errno));
exit(1);
}
/* post_sw */
if (shrimp_mode == MODE_COLOUR_SPACE) {
post_sw_setup(max_window_len + longest_read_len,
pr_mismatch, pr_xover, pr_del_open, pr_del_extend, pr_ins_open, pr_ins_extend,
Qflag && !ignore_qvs, use_sanger_qvs, qual_vector_offset, qual_delta, true);
}
}
char * output;
if (Eflag){
int i;
if (sam_header_filename != NULL) {
FILE * sam_header_file = fopen(sam_header_filename, "r");
if (sam_header_file == NULL) {
perror("Failed to open sam header file ");
exit(1);
}
cat(sam_header_file, stdout);
fclose(sam_header_file);
} else {
// HD line
if (sam_header_hd != NULL) {
cat(sam_header_hd, stdout);
} else {
fprintf(stdout,"@HD\tVN:%s\tSO:%s\n","1.0","unsorted");
}
// SQ lines
if (sam_header_sq != NULL) {
cat(sam_header_sq, stdout);
} else {
for(i = 0; i < num_contigs; i++){
fprintf(stdout,"@SQ\tSN:%s\tLN:%u\n",contig_names[i],genome_len[i]);
}
}
// RG lines
if (sam_header_rg != NULL) {
cat(sam_header_rg, stdout);
} else if (sam_read_group_name != NULL) {
fprintf(stdout, "@RG\tID:%s\tSM:%s\n", sam_read_group_name, sam_sample_name);
}
// PG lines
if (sam_header_pg != NULL) {
cat(sam_header_pg, stdout);
} else {
fprintf(stdout, "@PG\tID:%s\tVN:%s\tCL:%s\n", "gmapper", SHRIMP_VERSION_STRING, command_line);
}
}
} else {
output = output_format_line(Rflag);
puts(output);
free(output);
}
before = gettimeinusecs();
bool launched = launch_scan_threads(fasta, left_fasta, right_fasta);
if (!launched) {
fprintf(stderr,"error: a fatal error occured while launching scan thread(s)!\n");
exit(1);
}
mapping_wallclock_usecs += (gettimeinusecs() - before);
if (single_reads_file) {
fasta_close(fasta);
} else {
fasta_close(left_fasta);
fasta_close(right_fasta);
}
print_statistics();
#pragma omp parallel shared(longest_read_len,max_window_len,a_gap_open_score, a_gap_extend_score, b_gap_open_score, b_gap_extend_score, \
match_score, mismatch_score,shrimp_mode,crossover_score,anchor_width) num_threads(num_threads)
{
sw_vector_cleanup();
if (shrimp_mode==MODE_COLOUR_SPACE) {
sw_full_cs_cleanup();
post_sw_cleanup();
}
sw_full_ls_cleanup();
f1_free();
if (use_regions) {
for (int number_in_pair = 0; number_in_pair < 2; number_in_pair++)
for (int st = 0; st < 2; st++)
//free(region_map[number_in_pair][st]);
my_free(region_map[number_in_pair][st], n_regions * sizeof(region_map[0][0][0]),
&mem_mapping, "region_map");
}
}
gen_st_delete(&contig_offsets_gen_st);
if (load_mmap != NULL) {
// munmap?
} else {
free_genome();
//free(seed);
if (Hflag) {
int sn;
for (sn = 0; sn < n_seeds; sn++) {
my_free(seed_hash_mask[sn], BPTO32BW(max_seed_span) * sizeof(seed_hash_mask[sn][0]),
&mem_small, "seed_hash_mask[%d]", sn);
}
my_free(seed_hash_mask, n_seeds * sizeof(seed_hash_mask[0]),
&mem_small, "seed_hash_mask");
}
my_free(seed, n_seeds * sizeof(seed[0]),
&mem_small, "seed");
}
//free(paired_mapping_options);
my_free(paired_mapping_options, n_paired_mapping_options * sizeof(paired_mapping_options[0]),
&mem_small, "paired_mapping_options");
//free(unpaired_mapping_options[0]);
my_free(unpaired_mapping_options[0], n_unpaired_mapping_options[0] * sizeof(unpaired_mapping_options[0][0]),
&mem_small, "unpaired_mapping_options[0]");
//free(unpaired_mapping_options[1]);
my_free(unpaired_mapping_options[1], n_unpaired_mapping_options[1] * sizeof(unpaired_mapping_options[0][0]),
&mem_small, "unpaired_mapping_options[1]");
if (sam_read_group_name != NULL)
free(sam_read_group_name);
// close some files
if (aligned_reads_file != NULL)
fclose(aligned_reads_file);
if (unaligned_reads_file != NULL)
fclose(unaligned_reads_file);
if (sam_header_hd != NULL)
fclose(sam_header_hd);
if (sam_header_sq != NULL)
fclose(sam_header_sq);
if (sam_header_rg != NULL)
fclose(sam_header_rg);
if (sam_header_pg != NULL)
fclose(sam_header_pg);
#ifdef MYALLOC_ENABLE_CRT
fprintf(stderr, "crt_mem: %lld\n", (long long)crt_mem);
#endif
#ifndef NDEBUG
fprintf(stderr, "mem_genomemap: max=%lld crt=%lld\n", (long long)count_get_max(&mem_genomemap), (long long)count_get_count(&mem_genomemap));
fprintf(stderr, "mem_mapping: max=%lld crt=%lld\n", (long long)count_get_max(&mem_mapping), (long long)count_get_count(&mem_mapping));
fprintf(stderr, "mem_thread_buffer: max=%lld crt=%lld\n", (long long)count_get_max(&mem_thread_buffer), (long long)count_get_count(&mem_thread_buffer));
fprintf(stderr, "mem_small: max=%lld crt=%lld\n", (long long)count_get_max(&mem_small), (long long)count_get_count(&mem_small));
fprintf(stderr, "mem_sw: max=%lld crt=%lld\n", (long long)count_get_max(&mem_sw), (long long)count_get_count(&mem_sw));
#endif
return 0;
}
|
yescrypt-opt_c.h | /*-
* Copyright 2009 Colin Percival
* Copyright 2013,2014 Alexander Peslyak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
#ifdef __i386__
#warning "This implementation does not use SIMD, and thus it runs a lot slower than the SIMD-enabled implementation. Enable at least SSE2 in the C compiler and use yescrypt-best.c instead unless you're building this SIMD-less implementation on purpose (portability to older CPUs or testing)."
#elif defined(__x86_64__)
#warning "This implementation does not use SIMD, and thus it runs a lot slower than the SIMD-enabled implementation. Use yescrypt-best.c instead unless you're building this SIMD-less implementation on purpose (for testing only)."
#endif
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include "sha256.h"
#include "sysendian.h"
#include "yescrypt.h"
#include "yescrypt-platform_c.h"
static inline void
blkcpy(uint64_t * dest, const uint64_t * src, size_t count)
{
do {
*dest++ = *src++; *dest++ = *src++;
*dest++ = *src++; *dest++ = *src++;
} while (count -= 4);
}
static inline void
blkxor(uint64_t * dest, const uint64_t * src, size_t count)
{
do {
*dest++ ^= *src++; *dest++ ^= *src++;
*dest++ ^= *src++; *dest++ ^= *src++;
} while (count -= 4);
}
typedef union {
uint32_t w[16];
uint64_t d[8];
} salsa20_blk_t;
static inline void
salsa20_simd_shuffle(const salsa20_blk_t * Bin, salsa20_blk_t * Bout)
{
#define COMBINE(out, in1, in2) \
Bout->d[out] = Bin->w[in1 * 2] | ((uint64_t)Bin->w[in2 * 2 + 1] << 32);
COMBINE(0, 0, 2)
COMBINE(1, 5, 7)
COMBINE(2, 2, 4)
COMBINE(3, 7, 1)
COMBINE(4, 4, 6)
COMBINE(5, 1, 3)
COMBINE(6, 6, 0)
COMBINE(7, 3, 5)
#undef COMBINE
}
static inline void
salsa20_simd_unshuffle(const salsa20_blk_t * Bin, salsa20_blk_t * Bout)
{
#define COMBINE(out, in1, in2) \
Bout->w[out * 2] = Bin->d[in1]; \
Bout->w[out * 2 + 1] = Bin->d[in2] >> 32;
COMBINE(0, 0, 6)
COMBINE(1, 5, 3)
COMBINE(2, 2, 0)
COMBINE(3, 7, 5)
COMBINE(4, 4, 2)
COMBINE(5, 1, 7)
COMBINE(6, 6, 4)
COMBINE(7, 3, 1)
#undef COMBINE
}
/**
* salsa20_8(B):
* Apply the salsa20/8 core to the provided block.
*/
static void
salsa20_8(uint64_t B[8])
{
size_t i;
salsa20_blk_t X;
#define x X.w
salsa20_simd_unshuffle((const salsa20_blk_t *)B, &X);
for (i = 0; i < 8; i += 2) {
#define R(a,b) (((a) << (b)) | ((a) >> (32 - (b))))
/* Operate on columns */
x[ 4] ^= R(x[ 0]+x[12], 7); x[ 8] ^= R(x[ 4]+x[ 0], 9);
x[12] ^= R(x[ 8]+x[ 4],13); x[ 0] ^= R(x[12]+x[ 8],18);
x[ 9] ^= R(x[ 5]+x[ 1], 7); x[13] ^= R(x[ 9]+x[ 5], 9);
x[ 1] ^= R(x[13]+x[ 9],13); x[ 5] ^= R(x[ 1]+x[13],18);
x[14] ^= R(x[10]+x[ 6], 7); x[ 2] ^= R(x[14]+x[10], 9);
x[ 6] ^= R(x[ 2]+x[14],13); x[10] ^= R(x[ 6]+x[ 2],18);
x[ 3] ^= R(x[15]+x[11], 7); x[ 7] ^= R(x[ 3]+x[15], 9);
x[11] ^= R(x[ 7]+x[ 3],13); x[15] ^= R(x[11]+x[ 7],18);
/* Operate on rows */
x[ 1] ^= R(x[ 0]+x[ 3], 7); x[ 2] ^= R(x[ 1]+x[ 0], 9);
x[ 3] ^= R(x[ 2]+x[ 1],13); x[ 0] ^= R(x[ 3]+x[ 2],18);
x[ 6] ^= R(x[ 5]+x[ 4], 7); x[ 7] ^= R(x[ 6]+x[ 5], 9);
x[ 4] ^= R(x[ 7]+x[ 6],13); x[ 5] ^= R(x[ 4]+x[ 7],18);
x[11] ^= R(x[10]+x[ 9], 7); x[ 8] ^= R(x[11]+x[10], 9);
x[ 9] ^= R(x[ 8]+x[11],13); x[10] ^= R(x[ 9]+x[ 8],18);
x[12] ^= R(x[15]+x[14], 7); x[13] ^= R(x[12]+x[15], 9);
x[14] ^= R(x[13]+x[12],13); x[15] ^= R(x[14]+x[13],18);
#undef R
}
#undef x
{
salsa20_blk_t Y;
salsa20_simd_shuffle(&X, &Y);
for (i = 0; i < 16; i += 4) {
((salsa20_blk_t *)B)->w[i] += Y.w[i];
((salsa20_blk_t *)B)->w[i + 1] += Y.w[i + 1];
((salsa20_blk_t *)B)->w[i + 2] += Y.w[i + 2];
((salsa20_blk_t *)B)->w[i + 3] += Y.w[i + 3];
}
}
}
/**
* blockmix_salsa8(Bin, Bout, X, r):
* Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
* bytes in length; the output Bout must also be the same size. The
* temporary space X must be 64 bytes.
*/
static void
blockmix_salsa8(const uint64_t * Bin, uint64_t * Bout, uint64_t * X, size_t r)
{
size_t i;
/* 1: X <-- B_{2r - 1} */
blkcpy(X, &Bin[(2 * r - 1) * 8], 8);
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < 2 * r; i += 2) {
/* 3: X <-- H(X \xor B_i) */
blkxor(X, &Bin[i * 8], 8);
salsa20_8(X);
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
blkcpy(&Bout[i * 4], X, 8);
/* 3: X <-- H(X \xor B_i) */
blkxor(X, &Bin[i * 8 + 8], 8);
salsa20_8(X);
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
blkcpy(&Bout[i * 4 + r * 8], X, 8);
}
}
/* These are tunable */
#define S_BITS 8
#define S_SIMD 2
#define S_P 4
#define S_ROUNDS 6
/* Number of S-boxes. Not tunable, hard-coded in a few places. */
#define S_N 2
/* Derived values. Not tunable on their own. */
#define S_SIZE1 (1 << S_BITS)
#define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8)
#define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK)
#define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD)
#define S_P_SIZE (S_P * S_SIMD)
#define S_MIN_R ((S_P * S_SIMD + 15) / 16)
/**
* pwxform(B):
* Transform the provided block using the provided S-boxes.
*/
static void
block_pwxform(uint64_t * B, const uint64_t * S)
{
uint64_t (*X)[S_SIMD] = (uint64_t (*)[S_SIMD])B;
const uint8_t *S0 = (const uint8_t *)S;
const uint8_t *S1 = (const uint8_t *)(S + S_SIZE1 * S_SIMD);
size_t i, j;
#if S_SIMD > 2
size_t k;
#endif
for (j = 0; j < S_P; j++) {
uint64_t *Xj = X[j];
uint64_t x0 = Xj[0];
#if S_SIMD > 1
uint64_t x1 = Xj[1];
#endif
for (i = 0; i < S_ROUNDS; i++) {
uint64_t x = x0 & S_MASK2;
const uint64_t *p0, *p1;
p0 = (const uint64_t *)(S0 + (uint32_t)x);
p1 = (const uint64_t *)(S1 + (x >> 32));
x0 = (uint64_t)(x0 >> 32) * (uint32_t)x0;
x0 += p0[0];
x0 ^= p1[0];
#if S_SIMD > 1
x1 = (uint64_t)(x1 >> 32) * (uint32_t)x1;
x1 += p0[1];
x1 ^= p1[1];
#endif
#if S_SIMD > 2
for (k = 2; k < S_SIMD; k++) {
x = Xj[k];
x = (uint64_t)(x >> 32) * (uint32_t)x;
x += p0[k];
x ^= p1[k];
Xj[k] = x;
}
#endif
}
Xj[0] = x0;
#if S_SIMD > 1
Xj[1] = x1;
#endif
}
}
/**
* blockmix_pwxform(Bin, Bout, S, r):
* Compute Bout = BlockMix_pwxform{salsa20/8, S, r}(Bin). The input Bin must
* be 128r bytes in length; the output Bout must also be the same size.
*
* S lacks const qualifier to match blockmix_salsa8()'s prototype, which we
* need to refer to both functions via the same function pointers.
*/
static void
blockmix_pwxform(const uint64_t * Bin, uint64_t * Bout, uint64_t * S, size_t r)
{
size_t r1, r2, i;
/* Convert 128-byte blocks to (S_P_SIZE * 64-bit) blocks */
r1 = r * 128 / (S_P_SIZE * 8);
/* X <-- B_{r1 - 1} */
blkcpy(Bout, &Bin[(r1 - 1) * S_P_SIZE], S_P_SIZE);
/* X <-- X \xor B_i */
blkxor(Bout, Bin, S_P_SIZE);
/* X <-- H'(X) */
/* B'_i <-- X */
block_pwxform(Bout, S);
/* for i = 0 to r1 - 1 do */
for (i = 1; i < r1; i++) {
/* X <-- X \xor B_i */
blkcpy(&Bout[i * S_P_SIZE], &Bout[(i - 1) * S_P_SIZE],
S_P_SIZE);
blkxor(&Bout[i * S_P_SIZE], &Bin[i * S_P_SIZE], S_P_SIZE);
/* X <-- H'(X) */
/* B'_i <-- X */
block_pwxform(&Bout[i * S_P_SIZE], S);
}
/* Handle partial blocks */
if (i * S_P_SIZE < r * 16)
blkcpy(&Bout[i * S_P_SIZE], &Bin[i * S_P_SIZE],
r * 16 - i * S_P_SIZE);
i = (r1 - 1) * S_P_SIZE / 8;
/* Convert 128-byte blocks to 64-byte blocks */
r2 = r * 2;
/* B'_i <-- H(B'_i) */
salsa20_8(&Bout[i * 8]);
i++;
for (; i < r2; i++) {
/* B'_i <-- H(B'_i \xor B'_{i-1}) */
blkxor(&Bout[i * 8], &Bout[(i - 1) * 8], 8);
salsa20_8(&Bout[i * 8]);
}
}
/**
* integerify(B, r):
* Return the result of parsing B_{2r-1} as a little-endian integer.
*/
static inline uint64_t
integerify(const uint64_t * B, size_t r)
{
/*
* Our 64-bit words are in host byte order, and word 6 holds the second 32-bit
* word of B_{2r-1} due to SIMD shuffling. The 64-bit value we return is also
* in host byte order, as it should be.
*/
const uint64_t * X = &B[(2 * r - 1) * 8];
uint32_t lo = X[0];
uint32_t hi = X[6] >> 32;
return ((uint64_t)hi << 32) + lo;
}
/**
* smix1(B, r, N, flags, V, NROM, shared, XY, S):
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 256r + 64 bytes in length. The value N must be even and
* no smaller than 2.
*/
static void
smix1(uint64_t * B, size_t r, uint64_t N, yescrypt_flags_t flags,
uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared,
uint64_t * XY, uint64_t * S)
{
void (*blockmix)(const uint64_t *, uint64_t *, uint64_t *, size_t) =
(S ? blockmix_pwxform : blockmix_salsa8);
const uint64_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 16 * r;
uint64_t * X = V;
uint64_t * Y = &XY[s];
uint64_t * Z = S ? S : &XY[2 * s];
uint64_t n, i, j;
size_t k;
/* 1: X <-- B */
/* 3: V_i <-- X */
for (i = 0; i < 2 * r; i++) {
const salsa20_blk_t *src = (const salsa20_blk_t *)&B[i * 8];
salsa20_blk_t *tmp = (salsa20_blk_t *)Y;
salsa20_blk_t *dst = (salsa20_blk_t *)&X[i * 8];
for (k = 0; k < 16; k++)
tmp->w[k] = le32dec(&src->w[k]);
salsa20_simd_shuffle(tmp, dst);
}
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
blockmix(X, Y, Z, r);
blkcpy(&V[s], Y, s);
X = XY;
if (NROM && (VROM_mask & 1)) {
if ((1 & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j = integerify(Y, r) & (NROM - 1);
/* X <-- H(X \xor VROM_j) */
blkxor(Y, &VROM[j * s], s);
}
blockmix(Y, X, Z, r);
/* 2: for i = 0 to N - 1 do */
for (n = 1, i = 2; i < N; i += 2) {
/* 3: V_i <-- X */
blkcpy(&V[i * s], X, s);
if ((i & (i - 1)) == 0)
n <<= 1;
/* j <-- Wrap(Integerify(X), i) */
j = integerify(X, r) & (n - 1);
j += i - n;
/* X <-- X \xor V_j */
blkxor(X, &V[j * s], s);
/* 4: X <-- H(X) */
blockmix(X, Y, Z, r);
/* 3: V_i <-- X */
blkcpy(&V[(i + 1) * s], Y, s);
j = integerify(Y, r);
if (((i + 1) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
/* X <-- H(X \xor VROM_j) */
blkxor(Y, &VROM[j * s], s);
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i + 1 - n;
/* X <-- H(X \xor V_j) */
blkxor(Y, &V[j * s], s);
}
blockmix(Y, X, Z, r);
}
} else {
yescrypt_flags_t rw = flags & YESCRYPT_RW;
/* 4: X <-- H(X) */
blockmix(Y, X, Z, r);
/* 2: for i = 0 to N - 1 do */
for (n = 1, i = 2; i < N; i += 2) {
/* 3: V_i <-- X */
blkcpy(&V[i * s], X, s);
if (rw) {
if ((i & (i - 1)) == 0)
n <<= 1;
/* j <-- Wrap(Integerify(X), i) */
j = integerify(X, r) & (n - 1);
j += i - n;
/* X <-- X \xor V_j */
blkxor(X, &V[j * s], s);
}
/* 4: X <-- H(X) */
blockmix(X, Y, Z, r);
/* 3: V_i <-- X */
blkcpy(&V[(i + 1) * s], Y, s);
if (rw) {
/* j <-- Wrap(Integerify(X), i) */
j = integerify(Y, r) & (n - 1);
j += (i + 1) - n;
/* X <-- X \xor V_j */
blkxor(Y, &V[j * s], s);
}
/* 4: X <-- H(X) */
blockmix(Y, X, Z, r);
}
}
/* B' <-- X */
for (i = 0; i < 2 * r; i++) {
const salsa20_blk_t *src = (const salsa20_blk_t *)&X[i * 8];
salsa20_blk_t *tmp = (salsa20_blk_t *)Y;
salsa20_blk_t *dst = (salsa20_blk_t *)&B[i * 8];
for (k = 0; k < 16; k++)
le32enc(&tmp->w[k], src->w[k]);
salsa20_simd_unshuffle(tmp, dst);
}
}
/**
* smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S):
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 256r + 64 bytes in length. The value N must be a
* power of 2 greater than 1. The value Nloop must be even.
*/
static void
smix2(uint64_t * B, size_t r, uint64_t N, uint64_t Nloop,
yescrypt_flags_t flags,
uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared,
uint64_t * XY, uint64_t * S)
{
void (*blockmix)(const uint64_t *, uint64_t *, uint64_t *, size_t) =
(S ? blockmix_pwxform : blockmix_salsa8);
const uint64_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1 | 1;
size_t s = 16 * r;
yescrypt_flags_t rw = flags & YESCRYPT_RW;
uint64_t * X = XY;
uint64_t * Y = &XY[s];
uint64_t * Z = S ? S : &XY[2 * s];
uint64_t i, j;
size_t k;
if (Nloop == 0)
return;
/* X <-- B' */
for (i = 0; i < 2 * r; i++) {
const salsa20_blk_t *src = (const salsa20_blk_t *)&B[i * 8];
salsa20_blk_t *tmp = (salsa20_blk_t *)Y;
salsa20_blk_t *dst = (salsa20_blk_t *)&X[i * 8];
for (k = 0; k < 16; k++)
tmp->w[k] = le32dec(&src->w[k]);
salsa20_simd_shuffle(tmp, dst);
}
if (NROM) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
/* 7: j <-- Integerify(X) mod N */
j = integerify(X, r) & (N - 1);
/* 8: X <-- H(X \xor V_j) */
blkxor(X, &V[j * s], s);
/* V_j <-- Xprev \xor V_j */
if (rw)
blkcpy(&V[j * s], X, s);
blockmix(X, Y, Z, r);
j = integerify(Y, r);
if (((i + 1) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
/* X <-- H(X \xor VROM_j) */
blkxor(Y, &VROM[j * s], s);
} else {
/* 7: j <-- Integerify(X) mod N */
j &= N - 1;
/* 8: X <-- H(X \xor V_j) */
blkxor(Y, &V[j * s], s);
/* V_j <-- Xprev \xor V_j */
if (rw)
blkcpy(&V[j * s], Y, s);
}
blockmix(Y, X, Z, r);
}
} else {
/* 6: for i = 0 to N - 1 do */
i = Nloop / 2;
do {
/* 7: j <-- Integerify(X) mod N */
j = integerify(X, r) & (N - 1);
/* 8: X <-- H(X \xor V_j) */
blkxor(X, &V[j * s], s);
/* V_j <-- Xprev \xor V_j */
if (rw)
blkcpy(&V[j * s], X, s);
blockmix(X, Y, Z, r);
/* 7: j <-- Integerify(X) mod N */
j = integerify(Y, r) & (N - 1);
/* 8: X <-- H(X \xor V_j) */
blkxor(Y, &V[j * s], s);
/* V_j <-- Xprev \xor V_j */
if (rw)
blkcpy(&V[j * s], Y, s);
blockmix(Y, X, Z, r);
} while (--i);
}
/* 10: B' <-- X */
for (i = 0; i < 2 * r; i++) {
const salsa20_blk_t *src = (const salsa20_blk_t *)&X[i * 8];
salsa20_blk_t *tmp = (salsa20_blk_t *)Y;
salsa20_blk_t *dst = (salsa20_blk_t *)&B[i * 8];
for (k = 0; k < 16; k++)
le32enc(&tmp->w[k], src->w[k]);
salsa20_simd_unshuffle(tmp, dst);
}
}
/**
* p2floor(x):
* Largest power of 2 not greater than argument.
*/
static uint64_t
p2floor(uint64_t x)
{
uint64_t y;
while ((y = x & (x - 1)))
x = y;
return x;
}
/**
* smix(B, r, N, p, t, flags, V, NROM, shared, XY, S):
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
* temporary storage V must be 128rN bytes in length; the temporary storage
* XY must be 256r+64 or (256r+64)*p bytes in length (the larger size is
* required with OpenMP-enabled builds). The value N must be a power of 2
* greater than 1.
*/
static void
smix(uint64_t * B, size_t r, uint64_t N, uint32_t p, uint32_t t,
yescrypt_flags_t flags,
uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared,
uint64_t * XY, uint64_t * S)
{
size_t s = 16 * r;
uint64_t Nchunk = N / p, Nloop_all, Nloop_rw;
uint32_t i;
Nloop_all = Nchunk;
if (flags & YESCRYPT_RW) {
if (t <= 1) {
if (t)
Nloop_all *= 2; /* 2/3 */
Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */
} else {
Nloop_all *= t - 1;
}
} else if (t) {
if (t == 1)
Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */
Nloop_all *= t;
}
Nloop_rw = 0;
if (flags & __YESCRYPT_INIT_SHARED)
Nloop_rw = Nloop_all;
else if (flags & YESCRYPT_RW)
Nloop_rw = Nloop_all / p;
Nchunk &= ~(uint64_t)1; /* round down to even */
Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */
Nloop_rw &= ~(uint64_t)1; /* round down to even */
#ifdef _OPENMP
#pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw)
{
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint64_t Vchunk = i * Nchunk;
uint64_t * Bp = &B[i * s];
uint64_t * Vp = &V[Vchunk * s];
#ifdef _OPENMP
uint64_t * XYp = &XY[i * (2 * s + 8)];
#else
uint64_t * XYp = XY;
#endif
uint64_t Np = (i < p - 1) ? Nchunk : (N - Vchunk);
uint64_t * Sp = S ? &S[i * S_SIZE_ALL] : S;
if (Sp)
smix1(Bp, 1, S_SIZE_ALL / 16,
flags & ~YESCRYPT_PWXFORM,
Sp, NROM, shared, XYp, NULL);
if (!(flags & __YESCRYPT_INIT_SHARED_2))
smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp);
smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp,
NROM, shared, XYp, Sp);
}
if (Nloop_all > Nloop_rw) {
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint64_t * Bp = &B[i * s];
#ifdef _OPENMP
uint64_t * XYp = &XY[i * (2 * s + 8)];
#else
uint64_t * XYp = XY;
#endif
uint64_t * Sp = S ? &S[i * S_SIZE_ALL] : S;
smix2(Bp, r, N, Nloop_all - Nloop_rw,
flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp);
}
}
#ifdef _OPENMP
}
#endif
}
/**
* yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen,
* N, r, p, t, flags, buf, buflen):
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
* p, buflen), or a revision of scrypt as requested by flags and shared, and
* write the result into buf. The parameters r, p, and buflen must satisfy
* r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power
* of 2 greater than 1.
*
* t controls computation time while not affecting peak memory usage. shared
* and flags may request special modes as described in yescrypt.h. local is
* the thread-local data structure, allowing to preserve and reuse a memory
* allocation across calls, thereby reducing its overhead.
*
* Return 0 on success; or -1 on error.
*/
static int
yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
const uint8_t * passwd, size_t passwdlen,
const uint8_t * salt, size_t saltlen,
uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags,
uint8_t * buf, size_t buflen)
{
yescrypt_region_t tmp;
uint64_t NROM;
size_t B_size, V_size, XY_size, need;
uint64_t * B, * V, * XY, * S;
uint64_t sha256[4];
/*
* YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose,
* so don't let it have side-effects. Without this adjustment, it'd
* enable the SHA-256 password pre-hashing and output post-hashing,
* because any deviation from classic scrypt implies those.
*/
if (p == 1)
flags &= ~YESCRYPT_PARALLEL_SMIX;
/* Sanity-check parameters */
if (flags & ~YESCRYPT_KNOWN_FLAGS) {
errno = EINVAL;
return -1;
}
#if SIZE_MAX > UINT32_MAX
if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
errno = EFBIG;
return -1;
}
#endif
if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
errno = EFBIG;
return -1;
}
if (((N & (N - 1)) != 0) || (N <= 1) || (r < 1) || (p < 1)) {
errno = EINVAL;
return -1;
}
if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 1)) {
errno = EINVAL;
return -1;
}
#if S_MIN_R > 1
if ((flags & YESCRYPT_PWXFORM) && (r < S_MIN_R)) {
errno = EINVAL;
return -1;
}
#endif
if ((p > SIZE_MAX / ((size_t)256 * r + 64)) ||
#if SIZE_MAX / 256 <= UINT32_MAX
(r > SIZE_MAX / 256) ||
#endif
(N > SIZE_MAX / 128 / r)) {
errno = ENOMEM;
return -1;
}
if (N > UINT64_MAX / ((uint64_t)t + 1)) {
errno = EFBIG;
return -1;
}
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX) &&
(N > SIZE_MAX / 128 / (r * p))) {
errno = ENOMEM;
return -1;
}
#endif
if ((flags & YESCRYPT_PWXFORM) &&
#ifndef _OPENMP
(flags & YESCRYPT_PARALLEL_SMIX) &&
#endif
p > SIZE_MAX / (S_SIZE_ALL * sizeof(*S))) {
errno = ENOMEM;
return -1;
}
NROM = 0;
if (shared->shared1.aligned) {
NROM = shared->shared1.aligned_size / ((size_t)128 * r);
if (((NROM & (NROM - 1)) != 0) || (NROM <= 1) ||
!(flags & YESCRYPT_RW)) {
errno = EINVAL;
return -1;
}
}
/* Allocate memory */
V = NULL;
V_size = (size_t)128 * r * N;
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX))
V_size *= p;
#endif
need = V_size;
if (flags & __YESCRYPT_INIT_SHARED) {
if (local->aligned_size < need) {
if (local->base || local->aligned ||
local->base_size || local->aligned_size) {
errno = EINVAL;
return -1;
}
if (!alloc_region(local, need))
return -1;
}
V = (uint64_t *)local->aligned;
need = 0;
}
B_size = (size_t)128 * r * p;
need += B_size;
if (need < B_size) {
errno = ENOMEM;
return -1;
}
XY_size = (size_t)256 * r + 64;
#ifdef _OPENMP
XY_size *= p;
#endif
need += XY_size;
if (need < XY_size) {
errno = ENOMEM;
return -1;
}
if (flags & YESCRYPT_PWXFORM) {
size_t S_size = S_SIZE_ALL * sizeof(*S);
#ifdef _OPENMP
S_size *= p;
#else
if (flags & YESCRYPT_PARALLEL_SMIX)
S_size *= p;
#endif
need += S_size;
if (need < S_size) {
errno = ENOMEM;
return -1;
}
}
if (flags & __YESCRYPT_INIT_SHARED) {
if (!alloc_region(&tmp, need))
return -1;
B = (uint64_t *)tmp.aligned;
XY = (uint64_t *)((uint8_t *)B + B_size);
} else {
init_region(&tmp);
if (local->aligned_size < need) {
if (free_region(local))
return -1;
if (!alloc_region(local, need))
return -1;
}
B = (uint64_t *)local->aligned;
V = (uint64_t *)((uint8_t *)B + B_size);
XY = (uint64_t *)((uint8_t *)V + V_size);
}
S = NULL;
if (flags & YESCRYPT_PWXFORM)
S = (uint64_t *)((uint8_t *)XY + XY_size);
if (t || flags) {
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, passwd, passwdlen);
SHA256_Final((uint8_t *)sha256, &ctx);
passwd = (uint8_t *)sha256;
passwdlen = sizeof(sha256);
}
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1,
(uint8_t *)B, B_size);
if (t || flags)
blkcpy(sha256, B, sizeof(sha256) / sizeof(sha256[0]));
if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) {
smix(B, r, N, p, t, flags, V, NROM, shared, XY, S);
} else {
uint32_t i;
/* 2: for i = 0 to p - 1 do */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S)
#endif
for (i = 0; i < p; i++) {
/* 3: B_i <-- MF(B_i, N) */
#ifdef _OPENMP
smix(&B[(size_t)16 * r * i], r, N, 1, t, flags,
&V[(size_t)16 * r * i * N],
NROM, shared,
&XY[((size_t)32 * r + 8) * i],
S ? &S[S_SIZE_ALL * i] : S);
#else
smix(&B[(size_t)16 * r * i], r, N, 1, t, flags, V,
NROM, shared, XY, S);
#endif
}
}
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
PBKDF2_SHA256(passwd, passwdlen, (uint8_t *)B, B_size, 1, buf, buflen);
/*
* Except when computing classic scrypt, allow all computation so far
* to be performed on the client. The final steps below match those of
* SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so
* far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of
* SCRAM's use of SHA-1) would be usable with yescrypt hashes.
*/
if ((t || flags) && buflen == sizeof(sha256)) {
/* Compute ClientKey */
{
HMAC_SHA256_CTX ctx;
HMAC_SHA256_Init(&ctx, buf, buflen);
HMAC_SHA256_Update(&ctx, "DashBanana", 10);
HMAC_SHA256_Final((uint8_t *)sha256, &ctx);
}
/* Compute StoredKey */
{
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, (uint8_t *)sha256, sizeof(sha256));
SHA256_Final(buf, &ctx);
}
}
if (free_region(&tmp))
return -1;
/* Success! */
return 0;
}
|
Example6.c | //#include <stdio.h>
//#include <omp.h>
//#include <conio.h>
//
//int main(int argc, char *argv[])
//{
// int tid, n = 6;
//#pragma omp parallel if (n > 5) private(tid) shared(n) num_threads(3) // It's work as Parallel
// {
// tid = omp_get_thread_num();
//#pragma omp single
// {
// printf("Value of n = %d\n", n);
// printf("Size of Threads = %d\n", omp_get_num_threads());
// }
// printf("Print statement executed by Thread (%d) \n", tid);
// } /*-- End to parallel segment --*/
//
//
// printf("--------------------------------------------------\n\n");
//
//#pragma omp parallel if (n > 8) private(tid) shared(n) num_threads(3) // It's work as Serial (as main)
// {
// tid = omp_get_thread_num();
//#pragma omp single
// {
// printf("Value of n = %d\n", n);
// printf("Size of Threads = %d\n", omp_get_num_threads());
// }
// printf("Print statement executed by Thread (%d) \n", tid);
// } /*-- End to parallel segment --*/
//
//
// _getch(); // for keep console from <conio.h> library
// return 0;
//} |
elu_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: haitao@openailab.com
*/
#include "elu_kernel_arm.h"
#include "neon_mathfun.h"
#include <math.h>
#include <arm_neon.h>
static void elu_kernel(int i, int id, void* data, const float* input, float* output, float alpha)
{
int elem_num = (( int* )data)[0];
float32x4_t _one = vdupq_n_f32(1.f);
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _alpha = vdupq_n_f32(alpha);
const float* cur_input = input + id * elem_num;
float* cur_output = output + id * elem_num;
for (int i = 0; i < (elem_num & -4); i += 4)
{
float32x4_t _p = vld1q_f32(cur_input);
uint32x4_t _lemask = vcleq_f32(_p, _zero);
float32x4_t _nps = exp_ps(_p);
_nps = vsubq_f32(_nps, _one);
_nps = vmulq_f32(_nps, _alpha);
_p = vbslq_f32(_lemask, _nps, _p);
vst1q_f32(cur_output, _p);
cur_input += 4;
cur_output += 4;
}
for (int i = elem_num & ~3; i < elem_num; i++)
{
if (*cur_input < 0.f)
*cur_output = (exp(*cur_input) - 1.f) * alpha;
else
*cur_output = *cur_input;
cur_input++;
cur_output++;
}
}
int elu_run(struct tensor* output_tensor, struct tensor* input_tensor, struct elu_param* elu_param,
int num_thread)
{
float* data = ( float* )input_tensor->data;
float* out_data = ( float* )output_tensor->data;
float alpha = elu_param->alpha;
int chan_num = (input_tensor->dims[0]) * (input_tensor->dims[1]);
int chan_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < chan_num; i++)
{
int offset = i * chan_size;
elu_kernel(0, 0, &chan_size, data + offset, out_data + offset, alpha);
}
return 0;
}
|
nmt_master_flat.c | #include "config.h"
#include "utils.h"
static void purify_generic_flat(nmt_field_flat *fl,flouble *mask,fcomplex **walm0,flouble **maps_in,fcomplex **alms_out)
{
if(fl->pure_b || fl->pure_e) {
nmt_purify_flat(fl,mask,walm0,maps_in,maps_in,alms_out);
}
else {
int im1;
for(im1=0;im1<fl->nmaps;im1++)
fs_map_product(fl->fs,maps_in[im1],mask,maps_in[im1]);
fs_map2alm(fl->fs,1,fl->spin,maps_in,alms_out);
}
}
void nmt_workspace_flat_free(nmt_workspace_flat *w)
{
int ii;
gsl_permutation_free(w->coupling_matrix_perm);
gsl_matrix_free(w->coupling_matrix_binned_gsl);
for(ii=0;ii<w->ncls*w->bin->n_bands;ii++)
free(w->coupling_matrix_unbinned[ii]);
free(w->coupling_matrix_unbinned);
for(ii=0;ii<w->ncls*w->bin->n_bands;ii++)
free(w->coupling_matrix_binned[ii]);
free(w->coupling_matrix_binned);
free(w->n_cells);
nmt_bins_flat_free(w->bin);
nmt_flatsky_info_free(w->fs);
free(w);
}
static nmt_workspace_flat *nmt_workspace_flat_new(int ncls,nmt_flatsky_info *fs,
nmt_binning_scheme_flat *bin,
flouble lmn_x,flouble lmx_x,
flouble lmn_y,flouble lmx_y,int is_teb)
{
int ii,ib=0;
nmt_workspace_flat *w=my_malloc(sizeof(nmt_workspace_flat));
w->is_teb=is_teb;
w->ncls=ncls;
w->ellcut_x[0]=lmn_x;
w->ellcut_x[1]=lmx_x;
w->ellcut_y[0]=lmn_y;
w->ellcut_y[1]=lmx_y;
w->bin=nmt_bins_flat_create(bin->n_bands,bin->ell_0_list,bin->ell_f_list);
w->lmax=w->bin->ell_f_list[w->bin->n_bands-1];
w->fs=nmt_flatsky_info_alloc(fs->nx,fs->ny,fs->lx,fs->ly);
w->n_cells=my_calloc(w->bin->n_bands,sizeof(int));
w->coupling_matrix_unbinned=my_malloc(w->ncls*w->bin->n_bands*sizeof(flouble *));
for(ii=0;ii<w->ncls*w->bin->n_bands;ii++)
w->coupling_matrix_unbinned[ii]=my_calloc(w->ncls*w->fs->n_ell,sizeof(flouble));
w->coupling_matrix_binned=my_malloc(w->ncls*w->bin->n_bands*sizeof(flouble *));
for(ii=0;ii<w->ncls*w->bin->n_bands;ii++)
w->coupling_matrix_binned[ii]=my_calloc(w->ncls*w->bin->n_bands,sizeof(flouble));
w->coupling_matrix_binned_gsl=gsl_matrix_alloc(w->ncls*w->bin->n_bands,w->ncls*w->bin->n_bands);
w->coupling_matrix_perm=gsl_permutation_alloc(w->ncls*w->bin->n_bands);
return w;
}
static int check_flatsky_infos(nmt_flatsky_info *fs1,nmt_flatsky_info *fs2)
{
if(fs1->nx!=fs2->nx) return 1;
if(fs1->ny!=fs2->ny) return 1;
if(fs1->lx!=fs2->lx) return 1;
if(fs1->ly!=fs2->ly) return 1;
return 0;
}
nmt_workspace_flat *nmt_compute_coupling_matrix_flat(nmt_field_flat *fl1,nmt_field_flat *fl2,
nmt_binning_scheme_flat *bin,
flouble lmn_x,flouble lmx_x,
flouble lmn_y,flouble lmx_y,int is_teb)
{
if(check_flatsky_infos(fl1->fs,fl2->fs))
report_error(NMT_ERROR_CONSISTENT_RESO,"Can only correlate fields defined on the same pixels!\n");
int n_cl=fl1->nmaps*fl2->nmaps;
if(is_teb) {
if(!((fl1->spin==0) && (fl2->spin!=0)))
report_error(NMT_ERROR_INCONSISTENT,"For T-E-B MCM the first input field must be spin-0 and the second spin-!=0\n");
n_cl=7;
}
int spin_1, spin_2;
if(fl1->spin==0) {
if(fl2->spin==0) {
spin_1=0; spin_2=0;
}
else {
spin_1=fl2->spin; spin_2=0;
}
}
else {
spin_1=fl1->spin;
spin_2=fl2->spin;
}
if(n_cl==7)
spin_2=spin_1;
int ii;
nmt_workspace_flat *w=nmt_workspace_flat_new(n_cl,fl1->fs,bin,
lmn_x,lmx_x,lmn_y,lmx_y,is_teb);
nmt_flatsky_info *fs=fl1->fs;
w->pe1=fl1->pure_e;
w->pe2=fl2->pure_e;
w->pb1=fl1->pure_b;
w->pb2=fl2->pure_b;
fcomplex *cmask1,*cmask2;
flouble *maskprod,*beamprod;
flouble *cosarr1,*sinarr1,*cosarr2,*sinarr2,*kmodarr;
int *i_band,*i_band_nocut,*i_ring;
cmask1=dftw_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex));
fs_map2alm(fl1->fs,1,0,&(fl1->mask),&cmask1);
if(fl1==fl2)
cmask2=cmask1;
else {
cmask2=dftw_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex));
fs_map2alm(fl2->fs,1,0,&(fl2->mask),&cmask2);
}
i_ring=my_malloc(w->fs->npix*sizeof(int));
i_band=my_malloc(w->fs->npix*sizeof(int));
maskprod=my_malloc(w->fs->npix*sizeof(flouble));
i_band_nocut=my_malloc(w->fs->npix*sizeof(int));
kmodarr=dftw_malloc(w->fs->npix*sizeof(flouble));
beamprod=dftw_malloc(w->fs->npix*sizeof(flouble));
if(w->ncls>1) {
cosarr1=dftw_malloc(w->fs->npix*sizeof(flouble));
sinarr1=dftw_malloc(w->fs->npix*sizeof(flouble));
cosarr2=dftw_malloc(w->fs->npix*sizeof(flouble));
sinarr2=dftw_malloc(w->fs->npix*sizeof(flouble));
}
int *x_out_range,*y_out_range;
x_out_range=my_calloc(fs->nx,sizeof(int));
y_out_range=my_calloc(fs->ny,sizeof(int));
for(ii=0;ii<fs->nx;ii++) {
flouble k;
if(2*ii<=fs->nx) k=ii*2*M_PI/fs->lx;
else k=-(fs->nx-ii)*2*M_PI/fs->lx;
if((k<=w->ellcut_x[1]) && (k>=w->ellcut_x[0]))
x_out_range[ii]=1;
}
for(ii=0;ii<fs->ny;ii++) {
flouble k;
if(2*ii<=fs->ny) k=ii*2*M_PI/fs->ly;
else k=-(fs->ny-ii)*2*M_PI/fs->ly;
if((k<=w->ellcut_y[1]) && (k>=w->ellcut_y[0]))
y_out_range[ii]=1;
}
#pragma omp parallel default(none) \
shared(fl1,fl2,fs,cmask1,cmask2,w,i_ring,i_band,i_band_nocut) \
shared(cosarr1,sinarr1,cosarr2,sinarr2,kmodarr,spin_1,spin_2) \
shared(beamprod,maskprod,x_out_range,y_out_range)
{
flouble dkx=2*M_PI/fs->lx;
flouble dky=2*M_PI/fs->ly;
int iy1,ix1;
int *n_cells_thr=my_calloc(w->bin->n_bands,sizeof(int));
gsl_interp_accel *intacc_beam=gsl_interp_accel_alloc();
#pragma omp for
for(iy1=0;iy1<fs->ny;iy1++) {
flouble ky;
int ik=0;
if(2*iy1<=fs->ny)
ky=iy1*dky;
else
ky=-(fs->ny-iy1)*dky;
for(ix1=0;ix1<fs->nx;ix1++) {
flouble kx,kmod,beam1,beam2;
int ix_here,index_here,index;
index=ix1+fs->nx*iy1;
if(2*ix1<=fs->nx) {
kx=ix1*dkx;
ix_here=ix1;
}
else {
kx=-(fs->nx-ix1)*dkx;
ix_here=fs->nx-ix1;
}
index_here=ix_here+(fs->nx/2+1)*iy1;
maskprod[index]=(creal(cmask1[index_here])*creal(cmask2[index_here])+
cimag(cmask1[index_here])*cimag(cmask2[index_here]));
kmod=sqrt(kx*kx+ky*ky);
beam1=nmt_k_function_eval(fl1->beam,kmod,intacc_beam);
beam2=nmt_k_function_eval(fl2->beam,kmod,intacc_beam);
kmodarr[index]=kmod;
beamprod[index]=beam1*beam2;
ik=nmt_bins_flat_search_fast(w->bin,kmod,ik);
if(y_out_range[iy1] || x_out_range[ix1])
i_band[index]=-1;
else {
if(ik>=0) {
i_band[index]=ik;
n_cells_thr[ik]++;
}
else
i_band[index]=-1;
}
i_band_nocut[index]=ik;
i_ring[index]=(int)(kmod*w->fs->i_dell);
if((i_ring[index]<0) || (i_ring[index]>=w->fs->n_ell))
i_ring[index]=-1;
if(w->ncls>1) {
int spin;
flouble c,s,csphi,ssphi;
if(kmod>0) {
c=kx/kmod;
s=ky/kmod;
}
else {
c=1.;
s=0.;
}
spin=0; csphi=1; ssphi=0;
while(spin<spin_1) {
flouble c2=csphi*c-ssphi*s;
flouble s2=ssphi*c+csphi*s;
csphi=c2;
ssphi=s2;
spin++;
}
cosarr1[index]=csphi;
sinarr1[index]=ssphi;
if(spin_2 != spin_1) {
spin=0; csphi=1; ssphi=0;
while(spin<spin_2) {
flouble c2=csphi*c-ssphi*s;
flouble s2=ssphi*c+csphi*s;
csphi=c2;
ssphi=s2;
spin++;
}
cosarr2[index]=csphi;
sinarr2[index]=ssphi;
}
else {
cosarr2[index]=cosarr1[index];
sinarr2[index]=sinarr1[index];
}
}
}
} //end omp for
#pragma omp critical
{
for(iy1=0;iy1<w->bin->n_bands;iy1++)
w->n_cells[iy1]+=n_cells_thr[iy1];
} //end omp critical
free(n_cells_thr);
gsl_interp_accel_free(intacc_beam);
} //end omp parallel
free(x_out_range);
free(y_out_range);
#pragma omp parallel default(none) \
shared(fs,i_ring,i_band,i_band_nocut,w) \
shared(cosarr1,sinarr1,cosarr2,sinarr2,kmodarr) \
shared(maskprod,beamprod,spin_1,spin_2)
{
int iy1,ix1,ix2,iy2;
int pe1=w->pe1,pe2=w->pe2,pb1=w->pb1,pb2=w->pb2;
int pure_any=pe1 || pb1 || pe2 || pb2;
flouble **coup_unbinned_thr,**coup_binned_thr;
coup_unbinned_thr=my_malloc(w->bin->n_bands*w->ncls*sizeof(flouble *));
for(iy1=0;iy1<w->bin->n_bands*w->ncls;iy1++)
coup_unbinned_thr[iy1]=my_calloc(w->fs->n_ell*w->ncls,sizeof(flouble));
coup_binned_thr=my_malloc(w->bin->n_bands*w->ncls*sizeof(flouble *));
for(iy1=0;iy1<w->bin->n_bands*w->ncls;iy1++)
coup_binned_thr[iy1]=my_calloc(w->bin->n_bands*w->ncls,sizeof(flouble));
#pragma omp for
for(iy1=0;iy1<fs->ny;iy1++) {
for(ix1=0;ix1<fs->nx;ix1++) {
int index1=ix1+fs->nx*iy1;
int ik1=i_band[index1];
if(ik1>=0) {
flouble inv_k1=0;
ik1*=w->ncls;
if((index1>0) && (w->ncls>1))
inv_k1=1./kmodarr[index1];
for(iy2=0;iy2<fs->ny;iy2++) {
for(ix2=0;ix2<fs->nx;ix2++) {
int index2=ix2+fs->nx*iy2;
int ir2=i_ring[index2];
int ik2=i_band_nocut[index2];
flouble cdiff1=1,sdiff1=0,cdiff2=1,sdiff2=0,kr=1,mp;
int index;
int iy=iy1-iy2;
int ix=ix1-ix2;
if(iy<0) iy+=fs->ny;
if(ix<0) ix+=fs->nx;
ik2*=w->ncls;
ir2*=w->ncls;
index=ix+fs->nx*iy;
if(w->ncls>1) {
cdiff1=cosarr1[index1]*cosarr1[index2]+sinarr1[index1]*sinarr1[index2];
sdiff1=sinarr1[index1]*cosarr1[index2]-cosarr1[index1]*sinarr1[index2];
cdiff2=cosarr2[index1]*cosarr2[index2]+sinarr2[index1]*sinarr2[index2];
sdiff2=sinarr2[index1]*cosarr2[index2]-cosarr2[index1]*sinarr2[index2];
if((index1==0) && (index2==0))
kr=1;
else
kr=kmodarr[index2]*inv_k1;
kr*=kr;
}
mp=maskprod[index]*beamprod[index2];
if(w->ncls==1) {
if(ir2>=0)
coup_unbinned_thr[ik1+0][ir2+0]+=mp;
if(ik2>=0)
coup_binned_thr[ik1+0][ik2+0]+=mp;
}
else if(w->ncls==2) {
flouble fc[2],fs[2];
fc[0]=cdiff1*mp;
fs[0]=sdiff1*mp;
if(pure_any) {
fc[1]=kr*mp; fs[1]=0;
}
if(ir2>=0) {
coup_unbinned_thr[ik1+0][ir2+0]+=fc[pe1+pe2]; //TE,TE
coup_unbinned_thr[ik1+0][ir2+1]-=fs[pe1+pe2]; //TE,TB
coup_unbinned_thr[ik1+1][ir2+0]+=fs[pb1+pb2]; //TB,TE
coup_unbinned_thr[ik1+1][ir2+1]+=fc[pb1+pb2]; //TB,TB
}
if(ik2>=0) {
coup_binned_thr[ik1+0][ik2+0]+=fc[pe1+pe2]; //TE,TE
coup_binned_thr[ik1+0][ik2+1]-=fs[pe1+pe2]; //TE,TB
coup_binned_thr[ik1+1][ik2+0]+=fs[pb1+pb2]; //TB,TE
coup_binned_thr[ik1+1][ik2+1]+=fc[pb1+pb2]; //TB,TB
}
}
else if(w->ncls==4) {
flouble fc1[2],fs1[2];
flouble fc2[2],fs2[2];
fc1[0]=cdiff1; fs1[0]=sdiff1;
fc2[0]=cdiff2; fs2[0]=sdiff2;
if(pure_any) {
fc1[1]=kr; fs1[1]=0;
fc2[1]=kr; fs2[1]=0;
}
if(ir2>=0) {
coup_unbinned_thr[ik1+0][ir2+0]+=fc1[pe1]*fc2[pe2]*mp; //EE,EE
coup_unbinned_thr[ik1+0][ir2+1]-=fc1[pe1]*fs2[pe2]*mp; //EE,EB
coup_unbinned_thr[ik1+0][ir2+2]-=fs1[pe1]*fc2[pe2]*mp; //EE,BE
coup_unbinned_thr[ik1+0][ir2+3]+=fs1[pe1]*fs2[pe2]*mp; //EE,BB
coup_unbinned_thr[ik1+1][ir2+0]+=fc1[pe1]*fs2[pb2]*mp; //EB,EE
coup_unbinned_thr[ik1+1][ir2+1]+=fc1[pe1]*fc2[pb2]*mp; //EB,EB
coup_unbinned_thr[ik1+1][ir2+2]-=fs1[pe1]*fs2[pb2]*mp; //EB,BE
coup_unbinned_thr[ik1+1][ir2+3]-=fs1[pe1]*fc2[pb2]*mp; //EB,BB
coup_unbinned_thr[ik1+2][ir2+0]+=fs1[pb1]*fc2[pe2]*mp; //BE,EE
coup_unbinned_thr[ik1+2][ir2+1]-=fs1[pb1]*fs2[pe2]*mp; //BE,EB
coup_unbinned_thr[ik1+2][ir2+2]+=fc1[pb1]*fc2[pe2]*mp; //BE,BE
coup_unbinned_thr[ik1+2][ir2+3]-=fc1[pb1]*fs2[pe2]*mp; //BE,BB
coup_unbinned_thr[ik1+3][ir2+0]+=fs1[pb1]*fs2[pb2]*mp; //BB,EE
coup_unbinned_thr[ik1+3][ir2+1]+=fs1[pb1]*fc2[pb2]*mp; //BB,EB
coup_unbinned_thr[ik1+3][ir2+2]+=fc1[pb1]*fs2[pb2]*mp; //BB,BE
coup_unbinned_thr[ik1+3][ir2+3]+=fc1[pb1]*fc2[pb2]*mp; //BB,BB
}
if(ik2>=0) {
coup_binned_thr[ik1+0][ik2+0]+=fc1[pe1]*fc2[pe2]*mp; //EE,EE
coup_binned_thr[ik1+0][ik2+1]-=fc1[pe1]*fs2[pe2]*mp; //EE,EB
coup_binned_thr[ik1+0][ik2+2]-=fs1[pe1]*fc2[pe2]*mp; //EE,BE
coup_binned_thr[ik1+0][ik2+3]+=fs1[pe1]*fs2[pe2]*mp; //EE,BB
coup_binned_thr[ik1+1][ik2+0]+=fc1[pe1]*fs2[pb2]*mp; //EB,EE
coup_binned_thr[ik1+1][ik2+1]+=fc1[pe1]*fc2[pb2]*mp; //EB,EB
coup_binned_thr[ik1+1][ik2+2]-=fs1[pe1]*fs2[pb2]*mp; //EB,BE
coup_binned_thr[ik1+1][ik2+3]-=fs1[pe1]*fc2[pb2]*mp; //EB,BB
coup_binned_thr[ik1+2][ik2+0]+=fs1[pb1]*fc2[pe2]*mp; //BE,EE
coup_binned_thr[ik1+2][ik2+1]-=fs1[pb1]*fs2[pe2]*mp; //BE,EB
coup_binned_thr[ik1+2][ik2+2]+=fc1[pb1]*fc2[pe2]*mp; //BE,BE
coup_binned_thr[ik1+2][ik2+3]-=fc1[pb1]*fs2[pe2]*mp; //BE,BB
coup_binned_thr[ik1+3][ik2+0]+=fs1[pb1]*fs2[pb2]*mp; //BB,EE
coup_binned_thr[ik1+3][ik2+1]+=fs1[pb1]*fc2[pb2]*mp; //BB,EB
coup_binned_thr[ik1+3][ik2+2]+=fc1[pb1]*fs2[pb2]*mp; //BB,BE
coup_binned_thr[ik1+3][ik2+3]+=fc1[pb1]*fc2[pb2]*mp; //BB,BB
}
}
else if(w->ncls==7) {
flouble fc1[2],fs1[2];
flouble fc2[2],fs2[2];
fc1[0]=cdiff1; fs1[0]=sdiff1;
fc2[0]=cdiff2; fs2[0]=sdiff2;
if(pure_any) {
fc1[1]=kr; fs1[1]=0;
fc2[1]=kr; fs2[1]=0;
}
if(ir2>=0) {
coup_unbinned_thr[ik1+0][ir2+0]+=mp; //TT,TT
coup_unbinned_thr[ik1+1][ir2+1]+=fc1[pe1+pe2]*mp; //TE,TE
coup_unbinned_thr[ik1+1][ir2+2]-=fs1[pe1+pe2]*mp; //TE,TB
coup_unbinned_thr[ik1+2][ir2+1]+=fs1[pb1+pb2]*mp; //TB,TE
coup_unbinned_thr[ik1+2][ir2+2]+=fc1[pb1+pb2]*mp; //TB,TB
coup_unbinned_thr[ik1+3][ir2+3]+=fc1[pe2]*fc2[pe2]*mp; //EE,EE
coup_unbinned_thr[ik1+3][ir2+4]-=fc1[pe2]*fs2[pe2]*mp; //EE,EB
coup_unbinned_thr[ik1+3][ir2+5]-=fs1[pe2]*fc2[pe2]*mp; //EE,BE
coup_unbinned_thr[ik1+3][ir2+6]+=fs1[pe2]*fs2[pe2]*mp; //EE,BB
coup_unbinned_thr[ik1+4][ir2+3]+=fc1[pe2]*fs2[pb2]*mp; //EB,EE
coup_unbinned_thr[ik1+4][ir2+4]+=fc1[pe2]*fc2[pb2]*mp; //EB,EB
coup_unbinned_thr[ik1+4][ir2+5]-=fs1[pe2]*fs2[pb2]*mp; //EB,BE
coup_unbinned_thr[ik1+4][ir2+6]-=fs1[pe2]*fc2[pb2]*mp; //EB,BB
coup_unbinned_thr[ik1+5][ir2+3]+=fs1[pb2]*fc2[pe2]*mp; //BE,EE
coup_unbinned_thr[ik1+5][ir2+4]-=fs1[pb2]*fs2[pe2]*mp; //BE,EB
coup_unbinned_thr[ik1+5][ir2+5]+=fc1[pb2]*fc2[pe2]*mp; //BE,BE
coup_unbinned_thr[ik1+5][ir2+6]-=fc1[pb2]*fs2[pe2]*mp; //BE,BB
coup_unbinned_thr[ik1+6][ir2+3]+=fs1[pb2]*fs2[pb2]*mp; //BB,EE
coup_unbinned_thr[ik1+6][ir2+4]+=fs1[pb2]*fc2[pb2]*mp; //BB,EB
coup_unbinned_thr[ik1+6][ir2+5]+=fc1[pb2]*fs2[pb2]*mp; //BB,BE
coup_unbinned_thr[ik1+6][ir2+6]+=fc1[pb2]*fc2[pb2]*mp; //BB,BB
}
if(ik2>=0) {
coup_binned_thr[ik1+0][ik2+0]+=mp; //TT,TT
coup_binned_thr[ik1+1][ik2+1]+=fc1[pe1+pe2]*mp; //TE,TE
coup_binned_thr[ik1+1][ik2+2]-=fs1[pe1+pe2]*mp; //TE,TB
coup_binned_thr[ik1+2][ik2+1]+=fs1[pb1+pb2]*mp; //TB,TE
coup_binned_thr[ik1+2][ik2+2]+=fc1[pb1+pb2]*mp; //TB,TB
coup_binned_thr[ik1+3][ik2+3]+=fc1[pe2]*fc2[pe2]*mp; //EE,EE
coup_binned_thr[ik1+3][ik2+4]-=fc1[pe2]*fs2[pe2]*mp; //EE,EB
coup_binned_thr[ik1+3][ik2+5]-=fs1[pe2]*fc2[pe2]*mp; //EE,BE
coup_binned_thr[ik1+3][ik2+6]+=fs1[pe2]*fs2[pe2]*mp; //EE,BB
coup_binned_thr[ik1+4][ik2+3]+=fc1[pe2]*fs2[pb2]*mp; //EB,EE
coup_binned_thr[ik1+4][ik2+4]+=fc1[pe2]*fc2[pb2]*mp; //EB,EB
coup_binned_thr[ik1+4][ik2+5]-=fs1[pe2]*fs2[pb2]*mp; //EB,BE
coup_binned_thr[ik1+4][ik2+6]-=fs1[pe2]*fc2[pb2]*mp; //EB,BB
coup_binned_thr[ik1+5][ik2+3]+=fs1[pb2]*fc2[pe2]*mp; //BE,EE
coup_binned_thr[ik1+5][ik2+4]-=fs1[pb2]*fs2[pe2]*mp; //BE,EB
coup_binned_thr[ik1+5][ik2+5]+=fc1[pb2]*fc2[pe2]*mp; //BE,BE
coup_binned_thr[ik1+5][ik2+6]-=fc1[pb2]*fs2[pe2]*mp; //BE,BB
coup_binned_thr[ik1+6][ik2+3]+=fs1[pb2]*fs2[pb2]*mp; //BB,EE
coup_binned_thr[ik1+6][ik2+4]+=fs1[pb2]*fc2[pb2]*mp; //BB,EB
coup_binned_thr[ik1+6][ik2+5]+=fc1[pb2]*fs2[pb2]*mp; //BB,BE
coup_binned_thr[ik1+6][ik2+6]+=fc1[pb2]*fc2[pb2]*mp; //BB,BB
}
}
}
}
}
}
} //end omp for
#pragma omp critical
{
for(iy1=0;iy1<w->ncls*w->bin->n_bands;iy1++) {
for(iy2=0;iy2<w->ncls*w->bin->n_bands;iy2++)
w->coupling_matrix_binned[iy1][iy2]+=coup_binned_thr[iy1][iy2];
for(iy2=0;iy2<w->ncls*w->fs->n_ell;iy2++)
w->coupling_matrix_unbinned[iy1][iy2]+=coup_unbinned_thr[iy1][iy2];
}
} //end omp critical
for(iy1=0;iy1<w->bin->n_bands*w->ncls;iy1++) {
free(coup_unbinned_thr[iy1]);
free(coup_binned_thr[iy1]);
}
free(coup_unbinned_thr);
free(coup_binned_thr);
} //end omp parallel
#pragma omp parallel default(none) \
shared(w,fs)
{
int il1;
flouble fac_norm=4*M_PI*M_PI/(fs->lx*fs->lx*fs->ly*fs->ly);
#pragma omp for
for(il1=0;il1<w->bin->n_bands;il1++) {
int icl1;
flouble norm;
if(w->n_cells[il1]>0)
norm=fac_norm/w->n_cells[il1];
else
norm=0;
for(icl1=0;icl1<w->ncls;icl1++) {
int il2;
for(il2=0;il2<w->fs->n_ell;il2++) {
int icl2;
for(icl2=0;icl2<w->ncls;icl2++)
w->coupling_matrix_unbinned[w->ncls*il1+icl1][w->ncls*il2+icl2]*=norm;
}
for(il2=0;il2<w->bin->n_bands;il2++) {
int icl2;
for(icl2=0;icl2<w->ncls;icl2++)
w->coupling_matrix_binned[w->ncls*il1+icl1][w->ncls*il2+icl2]*=norm;
}
}
} //end omp for
} //end omp parallel
int icl_a,icl_b,ib2,ib3,sig;
for(icl_a=0;icl_a<w->ncls;icl_a++) {
for(icl_b=0;icl_b<w->ncls;icl_b++) {
for(ib2=0;ib2<w->bin->n_bands;ib2++) {
for(ib3=0;ib3<w->bin->n_bands;ib3++) {
gsl_matrix_set(w->coupling_matrix_binned_gsl,w->ncls*ib2+icl_a,w->ncls*ib3+icl_b,
w->coupling_matrix_binned[w->ncls*ib2+icl_a][w->ncls*ib3+icl_b]);
}
}
}
}
gsl_linalg_LU_decomp(w->coupling_matrix_binned_gsl,w->coupling_matrix_perm,&sig);
dftw_free(cmask1);
if(fl1!=fl2)
dftw_free(cmask2);
free(i_ring);
free(i_band);
free(i_band_nocut);
dftw_free(kmodarr);
dftw_free(beamprod);
free(maskprod);
if(w->ncls>1) {
dftw_free(cosarr1);
dftw_free(sinarr1);
dftw_free(cosarr2);
dftw_free(sinarr2);
}
return w;
}
void nmt_compute_deprojection_bias_flat(nmt_field_flat *fl1,nmt_field_flat *fl2,
nmt_binning_scheme_flat *bin,
flouble lmn_x,flouble lmx_x,flouble lmn_y,flouble lmx_y,
int nl_prop,flouble *l_prop,flouble **cl_proposal,
flouble **cl_bias)
{
if(fl1->lite || fl2->lite)
report_error(NMT_ERROR_LITE,"No deprojection bias for lightweight fields!\n");
//Placeholder
int ii;
long ip;
int nspec=fl1->nmaps*fl2->nmaps;
flouble **cl_dum=my_malloc(nspec*sizeof(flouble *));
nmt_k_function **cl_proposal_f=my_malloc(nspec*sizeof(nmt_k_function *));
for(ii=0;ii<nspec;ii++) {
cl_dum[ii]=my_calloc(bin->n_bands,sizeof(flouble));
cl_proposal_f[ii]=nmt_k_function_alloc(nl_prop,l_prop,cl_proposal[ii],cl_proposal[ii][0],0,0);
for(ip=0;ip<bin->n_bands;ip++)
cl_bias[ii][ip]=0;
}
if(check_flatsky_infos(fl1->fs,fl2->fs))
report_error(NMT_ERROR_CONSISTENT_RESO,"Can only correlate fields defined on the same pixels!\n");
//TODO: some terms (e.g. C^ab*SHT[w*g^j]) could be precomputed
//TODO: if fl1=fl2 F2=F3
//Allocate dummy maps and alms
flouble **map_1_dum=my_malloc(fl1->nmaps*sizeof(flouble *));
fcomplex **alm_1_dum=my_malloc(fl1->nmaps*sizeof(fcomplex *));
for(ii=0;ii<fl1->nmaps;ii++) {
map_1_dum[ii]=dftw_malloc(fl1->npix*sizeof(flouble));
alm_1_dum[ii]=dftw_malloc(fl1->fs->ny*(fl1->fs->nx/2+1)*sizeof(fcomplex));
}
flouble **map_2_dum=my_malloc(fl2->nmaps*sizeof(flouble *));
fcomplex **alm_2_dum=my_malloc(fl2->nmaps*sizeof(fcomplex *));
for(ii=0;ii<fl2->nmaps;ii++) {
map_2_dum[ii]=dftw_malloc(fl2->npix*sizeof(flouble));
alm_2_dum[ii]=dftw_malloc(fl2->fs->ny*(fl2->fs->nx/2+1)*sizeof(fcomplex));
}
if(fl2->ntemp>0) {
int iti;
for(iti=0;iti<fl2->ntemp;iti++) {
int itj;
for(itj=0;itj<fl2->ntemp;itj++) {
int im1,im2;
double nij=gsl_matrix_get(fl2->matrix_M,iti,itj);
//w*g^j
for(im2=0;im2<fl2->nmaps;im2++)
fs_map_product(fl2->fs,fl2->temp[itj][im2],fl2->mask,map_2_dum[im2]);
//DFT[w*g^j]
fs_map2alm(fl2->fs,1,fl2->spin,map_2_dum,alm_2_dum);
//C^ab*DFT[w*g^j]
for(im1=0;im1<fl1->nmaps;im1++) {
fs_zero_alm(fl1->fs,alm_1_dum[im1]);
for(im2=0;im2<fl2->nmaps;im2++)
fs_alter_alm(fl2->fs,-1.,alm_2_dum[im2],alm_1_dum[im1],cl_proposal_f[im1*fl2->nmaps+im2],1);
}
//DFT^-1[C^ab*DFT[w*g^j]]
fs_alm2map(fl1->fs,1,fl1->spin,map_1_dum,alm_1_dum);
//DFT[v*DFT^-1[C^ab*DFT[w*g^j]]]
purify_generic_flat(fl1,fl1->mask,fl1->a_mask,map_1_dum,alm_1_dum);
//Sum_m(DFT[v*DFT^-1[C^ab*DFT[w*g^j]]]*g^i*)/(2l+1)
fs_alm2cl(fl1->fs,bin,alm_1_dum,fl2->a_temp[iti],fl1->spin,fl2->spin,cl_dum,
lmn_x,lmx_x,lmn_y,lmx_y);
for(im1=0;im1<nspec;im1++) {
for(ip=0;ip<bin->n_bands;ip++)
cl_bias[im1][ip]-=cl_dum[im1][ip]*nij;
}
}
}
}
if(fl1->ntemp>0) {
int iti;
for(iti=0;iti<fl1->ntemp;iti++) {
int itj;
for(itj=0;itj<fl1->ntemp;itj++) {
int im1,im2;
double mij=gsl_matrix_get(fl1->matrix_M,iti,itj);
//v*f^j
for(im1=0;im1<fl1->nmaps;im1++)
fs_map_product(fl1->fs,fl1->temp[itj][im1],fl1->mask,map_1_dum[im1]);
//DFT[v*f^j]
fs_map2alm(fl1->fs,1,fl1->spin,map_1_dum,alm_1_dum);
//C^abT*DFT[v*f^j]
for(im2=0;im2<fl2->nmaps;im2++) {
fs_zero_alm(fl2->fs,alm_2_dum[im2]);
for(im1=0;im1<fl1->nmaps;im1++)
fs_alter_alm(fl1->fs,-1.,alm_1_dum[im1],alm_2_dum[im2],cl_proposal_f[im1*fl2->nmaps+im2],1);
}
//DFT^-1[C^abT*DFT[v*f^j]]
fs_alm2map(fl2->fs,1,fl2->spin,map_2_dum,alm_2_dum);
//DFT[w*DFT^-1[C^abT*DFT[v*f^j]]]
purify_generic_flat(fl2,fl2->mask,fl2->a_mask,map_2_dum,alm_2_dum);
//Sum_m(f^i*DFT[w*DFT^-1[C^abT*DFT[v*f^j]]]^*)/(2l+1)
fs_alm2cl(fl1->fs,bin,fl1->a_temp[iti],alm_2_dum,fl1->spin,fl2->spin,cl_dum,
lmn_x,lmx_x,lmn_y,lmx_y);
for(im1=0;im1<nspec;im1++) {
for(ip=0;ip<bin->n_bands;ip++)
cl_bias[im1][ip]-=cl_dum[im1][ip]*mij;
}
}
}
}
if((fl1->ntemp>0) && (fl2->ntemp>0)) {
int iti,itj,itp,itq,im1,im2;
flouble *mat_prod=my_calloc(fl1->ntemp*fl2->ntemp,sizeof(flouble));
for(itj=0;itj<fl1->ntemp;itj++) {
for(itq=0;itq<fl2->ntemp;itq++) {
//w*g^q
for(im2=0;im2<fl2->nmaps;im2++)
fs_map_product(fl2->fs,fl2->temp[itq][im2],fl2->mask,map_2_dum[im2]);
//DFT[w*g^q]
fs_map2alm(fl2->fs,1,fl2->spin,map_2_dum,alm_2_dum);
//C^ab*DFT[w*g^q]
for(im1=0;im1<fl1->nmaps;im1++) {
fs_zero_alm(fl1->fs,alm_1_dum[im1]);
for(im2=0;im2<fl2->nmaps;im2++)
fs_alter_alm(fl2->fs,-1.,alm_2_dum[im2],alm_1_dum[im1],cl_proposal_f[im1*fl2->nmaps+im2],1);
}
//DFT^-1[C^ab*DFT[w*g^q]]
fs_alm2map(fl1->fs,1,fl1->spin,map_1_dum,alm_1_dum);
for(im1=0;im1<fl1->nmaps;im1++) {
//v*DFT^-1[C^ab*DFT[w*g^q]]
fs_map_product(fl1->fs,map_1_dum[im1],fl1->mask,map_1_dum[im1]);
//Int[f^jT*v*DFT^-1[C^ab*DFT[w*g^q]]]
mat_prod[itj*fl2->ntemp+itq]+=fs_map_dot(fl1->fs,map_1_dum[im1],fl1->temp[itj][im1]);
}
}
}
for(iti=0;iti<fl1->ntemp;iti++) {
for(itp=0;itp<fl2->ntemp;itp++) {
//Sum_m(f^i*g^p*)/(2l+1)
fs_alm2cl(fl1->fs,bin,fl1->a_temp[iti],fl2->a_temp[itp],fl1->spin,fl2->spin,cl_dum,
lmn_x,lmx_x,lmn_y,lmx_y);
for(itj=0;itj<fl1->ntemp;itj++) {
double mij=gsl_matrix_get(fl1->matrix_M,iti,itj);
for(itq=0;itq<fl2->ntemp;itq++) {
double npq=gsl_matrix_get(fl2->matrix_M,itp,itq);
for(im1=0;im1<nspec;im1++) {
for(ip=0;ip<bin->n_bands;ip++)
cl_bias[im1][ip]+=cl_dum[im1][ip]*mat_prod[itj*fl2->ntemp+itq]*mij*npq;
}
}
}
}
}
free(mat_prod);
}
for(ii=0;ii<fl1->nmaps;ii++) {
dftw_free(map_1_dum[ii]);
dftw_free(alm_1_dum[ii]);
}
free(map_1_dum);
free(alm_1_dum);
for(ii=0;ii<fl2->nmaps;ii++) {
dftw_free(map_2_dum[ii]);
dftw_free(alm_2_dum[ii]);
}
free(map_2_dum);
free(alm_2_dum);
for(ii=0;ii<nspec;ii++) {
free(cl_dum[ii]);
nmt_k_function_free(cl_proposal_f[ii]);
}
free(cl_proposal_f);
free(cl_dum);
return;
}
void nmt_couple_cl_l_flat_fast(nmt_workspace_flat *w,int nl,flouble *larr,flouble **cl_in,flouble **cl_out)
{
int ii;
flouble *cl_in_rings=my_calloc(w->ncls*w->fs->n_ell,sizeof(flouble));
int *n_cells=my_calloc(w->fs->n_ell,sizeof(int));
nmt_k_function **fcl=my_malloc(w->ncls*sizeof(nmt_k_function *));
for(ii=0;ii<w->ncls;ii++)
fcl[ii]=nmt_k_function_alloc(nl,larr,cl_in[ii],cl_in[ii][0],0.,0);
//Interpolate input power spectrum onto grid and bin into rings
#pragma omp parallel default(none) \
shared(w,fcl,cl_in_rings,n_cells)
{
int iy1,ix1;
flouble dkx=2*M_PI/w->fs->lx;
flouble dky=2*M_PI/w->fs->ly;
flouble *cl_in_rings_thr=my_calloc(w->ncls*w->fs->n_ell,sizeof(flouble));
int *n_cells_thr=my_calloc(w->fs->n_ell,sizeof(int));
gsl_interp_accel *intacc=gsl_interp_accel_alloc();
#pragma omp for
for(iy1=0;iy1<w->fs->ny;iy1++) {
flouble ky;
if(2*iy1<=w->fs->ny)
ky=iy1*dky;
else
ky=-(w->fs->ny-iy1)*dky;
for(ix1=0;ix1<w->fs->nx;ix1++) {
flouble kx,kmod;
int ir;
if(2*ix1<=w->fs->nx)
kx=ix1*dkx;
else
kx=-(w->fs->nx-ix1)*dkx;
kmod=sqrt(kx*kx+ky*ky);
ir=(int)(kmod*w->fs->i_dell);
if(ir<w->fs->n_ell) {
int ic,ind0=ir*w->ncls;
n_cells_thr[ir]++;
for(ic=0;ic<w->ncls;ic++)
cl_in_rings_thr[ind0+ic]+=nmt_k_function_eval(fcl[ic],kmod,intacc);
}
}
} //end omp for
#pragma omp critical
{
for(iy1=0;iy1<w->fs->n_ell;iy1++)
n_cells[iy1]+=n_cells_thr[iy1];
for(iy1=0;iy1<w->fs->n_ell*w->ncls;iy1++)
cl_in_rings[iy1]+=cl_in_rings_thr[iy1];
} //end omp critical
free(cl_in_rings_thr);
free(n_cells_thr);
gsl_interp_accel_free(intacc);
} //end omp parallel
for(ii=0;ii<w->fs->n_ell;ii++) {
int ic;
for(ic=0;ic<w->ncls;ic++) {
if(n_cells[ii]>0)
cl_in_rings[ii*w->ncls+ic]/=n_cells[ii];
}
}
//Convolve with mode-coupling matrix
for(ii=0;ii<w->ncls;ii++) {
int i1;
for(i1=0;i1<w->bin->n_bands;i1++) {
int ind2,ind1=i1*w->ncls+ii;
cl_out[ii][i1]=0;
for(ind2=0;ind2<w->ncls*w->fs->n_ell;ind2++)
cl_out[ii][i1]+=w->coupling_matrix_unbinned[ind1][ind2]*cl_in_rings[ind2];
}
}
//Free up
free(cl_in_rings);
free(n_cells);
for(ii=0;ii<w->ncls;ii++)
nmt_k_function_free(fcl[ii]);
free(fcl);
}
void nmt_couple_cl_l_flat_quick(nmt_workspace_flat *w,int nl,flouble *larr,flouble **cl_in,flouble **cl_out)
{
int ii;
flouble **cell_in=my_malloc(w->ncls*sizeof(flouble *));
gsl_interp_accel *intacc=gsl_interp_accel_alloc();
for(ii=0;ii<w->ncls;ii++) {
nmt_k_function *fcl=nmt_k_function_alloc(nl,larr,cl_in[ii],cl_in[ii][0],0.,0);
cell_in[ii]=my_calloc(w->bin->n_bands,sizeof(flouble));
int iy;
flouble dkx=2*M_PI/w->fs->lx;
flouble dky=2*M_PI/w->fs->ly;
for(iy=0;iy<w->fs->ny;iy++) {
flouble ky;
int ik=0;
if(2*iy<=w->fs->ny)
ky=iy*dky;
else
ky=-(w->fs->ny-iy)*dky;
if((ky>w->ellcut_y[1]) || (ky<w->ellcut_y[0])) {
int ix;
for(ix=0;ix<w->fs->nx;ix++) {
flouble kx;
if(2*ix<=w->fs->nx)
kx=ix*dkx;
else
kx=-(w->fs->nx-ix)*dkx;
if((kx>w->ellcut_x[1]) || (kx<w->ellcut_x[0])) {
double kmod=sqrt(kx*kx+ky*ky);
ik=nmt_bins_flat_search_fast(w->bin,kmod,ik);
if(ik>=0)
cell_in[ii][ik]+=nmt_k_function_eval(fcl,kmod,intacc);
}
}
}
}
for(iy=0;iy<w->bin->n_bands;iy++) {
if(w->n_cells[iy]>0)
cell_in[ii][iy]/=w->n_cells[iy];
else
cell_in[ii][iy]=0;
}
nmt_k_function_free(fcl);
}
gsl_interp_accel_free(intacc);
int icl1;
for(icl1=0;icl1<w->ncls;icl1++) {
int i1;
for(i1=0;i1<w->bin->n_bands;i1++) {
int icl2;
int ind1=i1*w->ncls+icl1;
cl_out[icl1][i1]=0;
for(icl2=0;icl2<w->ncls;icl2++) {
int i2;
for(i2=0;i2<w->bin->n_bands;i2++) {
int ind2=i2*w->ncls+icl2;
cl_out[icl1][i1]+=w->coupling_matrix_binned[ind1][ind2]*cell_in[icl2][i2];
}
}
}
}
for(ii=0;ii<w->ncls;ii++)
free(cell_in[ii]);
free(cell_in);
}
void nmt_decouple_cl_l_flat(nmt_workspace_flat *w,flouble **cl_in,flouble **cl_noise_in,
flouble **cl_bias,flouble **cl_out)
{
int icl,ib2;
gsl_vector *dl_map_bad_b=gsl_vector_alloc(w->ncls*w->bin->n_bands);
gsl_vector *dl_map_good_b=gsl_vector_alloc(w->ncls*w->bin->n_bands);
//Bin coupled power spectrum
for(icl=0;icl<w->ncls;icl++) {
for(ib2=0;ib2<w->bin->n_bands;ib2++) {
gsl_vector_set(dl_map_bad_b,w->ncls*ib2+icl,
cl_in[icl][ib2]-cl_noise_in[icl][ib2]-cl_bias[icl][ib2]);
}
}
gsl_linalg_LU_solve(w->coupling_matrix_binned_gsl,w->coupling_matrix_perm,dl_map_bad_b,dl_map_good_b);
for(icl=0;icl<w->ncls;icl++) {
for(ib2=0;ib2<w->bin->n_bands;ib2++)
cl_out[icl][ib2]=gsl_vector_get(dl_map_good_b,w->ncls*ib2+icl);
}
gsl_vector_free(dl_map_bad_b);
gsl_vector_free(dl_map_good_b);
}
void nmt_compute_coupled_cell_flat(nmt_field_flat *fl1,nmt_field_flat *fl2,
nmt_binning_scheme_flat *bin,flouble **cl_out,
flouble lmn_x,flouble lmx_x,flouble lmn_y,flouble lmx_y)
{
if(fl1->mask_only || fl2->mask_only)
report_error(NMT_ERROR_LITE,"Can't correlate mapless fields!\n");
if(check_flatsky_infos(fl1->fs,fl2->fs))
report_error(NMT_ERROR_CONSISTENT_RESO,"Can only correlate fields defined on the same pixels!\n");
fs_alm2cl(fl1->fs,bin,fl1->alms,fl2->alms,fl1->spin,fl2->spin,cl_out,lmn_x,lmx_x,lmn_y,lmx_y);
}
nmt_workspace_flat *nmt_compute_power_spectra_flat(nmt_field_flat *fl1,nmt_field_flat *fl2,
nmt_binning_scheme_flat *bin,
flouble lmn_x,flouble lmx_x,
flouble lmn_y,flouble lmx_y,
nmt_workspace_flat *w0,flouble **cl_noise,
int nl_prop,flouble *l_prop,flouble **cl_prop,
flouble **cl_out)
{
int ii;
flouble **cl_bias,**cl_data;
nmt_workspace_flat *w;
if(w0==NULL)
w=nmt_compute_coupling_matrix_flat(fl1,fl2,bin,lmn_x,lmx_x,lmn_y,lmx_y,0);
else {
w=w0;
if((check_flatsky_infos(fl1->fs,w->fs)) || (check_flatsky_infos(fl2->fs,w->fs)))
report_error(NMT_ERROR_CONSISTENT_RESO,"Input workspace has different pixels!\n");
if(bin->n_bands!=w->bin->n_bands)
report_error(NMT_ERROR_CONSISTENT_RESO,"Input workspace has different bandpowers!\n");
}
cl_bias=my_malloc(w->ncls*sizeof(flouble *));
cl_data=my_malloc(w->ncls*sizeof(flouble *));
for(ii=0;ii<w->ncls;ii++) {
cl_bias[ii]=my_calloc(w->bin->n_bands,sizeof(flouble));
cl_data[ii]=my_calloc(w->bin->n_bands,sizeof(flouble));
}
nmt_compute_coupled_cell_flat(fl1,fl2,bin,cl_data,lmn_x,lmx_x,lmn_y,lmx_y);
nmt_compute_deprojection_bias_flat(fl1,fl2,bin,lmn_x,lmx_x,lmn_y,lmx_y,
nl_prop,l_prop,cl_prop,cl_bias);
nmt_decouple_cl_l_flat(w,cl_data,cl_noise,cl_bias,cl_out);
for(ii=0;ii<w->ncls;ii++) {
free(cl_bias[ii]);
free(cl_data[ii]);
}
free(cl_bias);
free(cl_data);
return w;
}
|
GB_binop__le_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__le_fp32
// A.*B function (eWiseMult): GB_AemultB__le_fp32
// A*D function (colscale): GB_AxD__le_fp32
// D*A function (rowscale): GB_DxB__le_fp32
// C+=B function (dense accum): GB_Cdense_accumB__le_fp32
// C+=b function (dense accum): GB_Cdense_accumb__le_fp32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_fp32
// C=scalar+B GB_bind1st__le_fp32
// C=scalar+B' GB_bind1st_tran__le_fp32
// C=A+scalar GB_bind2nd__le_fp32
// C=A'+scalar GB_bind2nd_tran__le_fp32
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x <= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_FP32 || GxB_NO_LE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__le_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__le_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__le_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__le_fp32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__le_fp32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__le_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__le_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__le_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__le_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB_bind1st_tran__le_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB_bind2nd_tran__le_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sapH_fmt_plug.c | /*
* this is a SAP-H plugin for john the ripper.
* Copyright (c) 2014 JimF, and it is hereby released
* to the general public under the following terms: Redistribution and use in
* source and binary forms, with or without modification, are permitted.
*
* The internals of this algorithm were found on the HashCat forum, and
* implemented here, whether, it is right or wrong. A link to that post is:
* http://hashcat.net/forum/thread-3804.html
* There are some things which are unclear, BUT which have been coded as listed
* within that post. Things such as the signatures themselves are somewhat
* unclear, and do not follow patterns well. The sha1 signature is lower case
* and does not contain the 1. The other signatures are upper case. This code
* was implemented in the exact manner as described on the forum, and will be
* used as such, until we find out that it is right or wrong (i.e. we get sample
* hashs from a REAL system in the other formats). If things are not correct,
* getting this format corrected will be trivial.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sapH;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sapH);
#else
#include <string.h>
#include <ctype.h>
#include "arch.h"
/* for now, undef this until I get OMP working, then start on SIMD */
//#undef _OPENMP
//#undef SIMD_COEF_32
//#undef SIMD_PARA_SHA1
//#undef SIMD_COEF_32
//#undef SIMD_PARA_SHA256
//#undef SIMD_COEF_64
//#undef SIMD_PARA_SHA512
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "base64_convert.h"
#include "sha.h"
#include "sha2.h"
#include "johnswap.h"
#if defined(_OPENMP)
#include <omp.h>
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#endif
/*
* Assumption is made that SIMD_COEF_32*SIMD_PARA_SHA1 is >= than
* SHA256_COEF*PARA and SHA512_COEF*PARA, and that these other 2
* will evenly divide the SIMD_COEF_32*SHA1_SSRE_PARA value.
* Works with current code. BUT if SIMD_PARA_SHA1 was 3 and
* SIMD_PARA_SHA256 was 2, then we would have problems.
*/
#ifdef SIMD_COEF_32
#define NBKEYS1 (SIMD_COEF_32 * SIMD_PARA_SHA1)
#else
#define NBKEYS1 1
#endif
#ifdef SIMD_COEF_32
#define NBKEYS256 (SIMD_COEF_32 * SIMD_PARA_SHA256)
#else
#define NBKEYS256 1
#endif
#ifdef SIMD_COEF_64
#define NBKEYS512 (SIMD_COEF_64 * SIMD_PARA_SHA512)
#else
#define NBKEYS512 1
#endif
// the least common multiple of the NBKEYS* above
#define NBKEYS (SIMD_COEF_32*SIMD_PARA_SHA1*SIMD_PARA_SHA256*SIMD_PARA_SHA512)
#include "simd-intrinsics.h"
#define FORMAT_LABEL "saph"
#define FORMAT_NAME "SAP CODVN H (PWDSALTEDHASH)"
#define FORMAT_TAG "{x-issha, "
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_TAG256 "{x-isSHA256, "
#define FORMAT_TAG256_LEN (sizeof(FORMAT_TAG256)-1)
#define FORMAT_TAG384 "{x-isSHA384, "
#define FORMAT_TAG384_LEN (sizeof(FORMAT_TAG384)-1)
#define FORMAT_TAG512 "{x-isSHA512, "
#define FORMAT_TAG512_LEN (sizeof(FORMAT_TAG512)-1)
#define ALGORITHM_NAME "SHA-1/SHA-2 " SHA1_ALGORITHM_NAME
#include "memdbg.h"
#define BENCHMARK_COMMENT " (SHA1x1024)"
#define BENCHMARK_LENGTH 0
#define SALT_LENGTH 16 /* the max used sized salt */
#define CIPHERTEXT_LENGTH 132 /* max salt+sha512 + 2^32 iterations */
#define BINARY_SIZE 16 /* we cut off all hashes down to 16 bytes */
#define MAX_BINARY_SIZE 64 /* sha512 is 64 byte */
#define SHA1_BINARY_SIZE 20
#define SHA256_BINARY_SIZE 32
#define SHA384_BINARY_SIZE 48
#define SHA512_BINARY_SIZE 64
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct sapH_salt)
#define SALT_ALIGN 4
/* NOTE, format is slow enough that endianity conversion is pointless. Just use flat buffers. */
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define PLAINTEXT_LENGTH 23
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define PLAINTEXT_LENGTH 125
#endif
static struct fmt_tests tests[] = {
/* first 2 hashes are 'default' 1024 iteration with 12 bytes salt so */
/* timings reflect that, and benchmark comment set to (sha1, 1024) */
{"{x-issha, 1024}hmiyJ2a/Z+HRpjQ37Osz+rYax9UxMjM0NTY3ODkwYWI=","OpenWall"},
{"{x-issha, 1024}fRLe9EvN/Le81BDEDZR5SEC0O6BhYmNkZWZnaHVrYWw=","JohnTheRipper"},
{"{x-issha, 1024}L1PHSP1vOwdYh0ASjswI69fQQQhzQXFlWmxnaFA5","booboo"},
{"{x-issha, 1024}dCjaHQ47/WeSwsoSYDR/8puLby5T","booboo"}, /* 1 byte salt */
{"{x-issha, 1024}+q+WSxWXJt7SjV5VJEymEKPUbn1FQWM=","HYulafeE!3"},
{"{x-issha, 6666}7qNFlIR+ZQUpe2DtSBvpvzU5VlBzcG1DVGxvOEFQODI=","dif_iterations"},
{"{x-isSHA256, 3000}UqMnsr5BYN+uornWC7yhGa/Wj0u5tshX19mDUQSlgih6OTFoZjRpMQ==","booboo"},
{"{x-isSHA256, 3000}ydi0JlyU6lX5305Qk/Q3uLBbIFjWuTyGo3tPBZDcGFd6NkFvV1gza3RkNg==","GottaGoWhereNeeded"},
{"{x-isSHA384, 5000}3O/F4YGKNmIYHDu7ZQ7Q+ioCOQi4HRY4yrggKptAU9DtmHigCuGqBiAPVbKbEAfGTzh4YlZLWUM=","booboo"},
{"{x-isSHA384, 5000}XSLo2AKIvACwqW/X416UeVbHOXmio4u27Z7cgXS2rxND+zTpN+x3JNfQcEQX2PT0Z3FPdEY2dHM=","yiPP3rs"},
{"{x-isSHA512, 7500}ctlX6qYsWspafEzwoej6nFp7zRQQjr8y22vE+xeveIX2gUndAw9N2Gep5azNUwuxOe2o7tusF800OfB9tg4taWI4Tg==","booboo"},
{"{x-isSHA512, 7500}Qyrh2JXgGkvIfKYOJRdWFut5/pVnXI/vZvqJ7N+Tz9M1zUTXGWCZSom4az4AhqOuAahBwuhcKqMq/pYPW4h3cThvT2JaWVBw","hapy1CCe!"},
{"{x-isSHA512, 18009}C2+Sij3JyXPPDuQgsF6Zot7XnjRFX86X67tWJpUzXNnFw2dKcGPH6HDEzVJ8HN8+cJe4vZaOYTlmdz09gI7YEwECAwQFBgcICQoLDA0ODwA=","maxlen"},
{NULL}
};
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE/sizeof(ARCH_WORD_32)];
static struct sapH_salt {
int slen; /* actual length of salt ( 1 to 16 bytes) */
int type; /* 1, 256, 384 or 512 for sha1, sha256, sha384 or sha512 */
unsigned iter; /* from 1 to 2^32 rounds */
unsigned char s[SALT_LENGTH];
} *sapH_cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_plain);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *cp = ciphertext;
char *keeptr;
int len, hash_len=0;
char tmp[MAX_BINARY_SIZE+SALT_LENGTH];
/* first check for 'simple' signatures before allocation other stuff. */
if (!strncmp(cp, FORMAT_TAG, FORMAT_TAG_LEN))
hash_len = SHA1_BINARY_SIZE;
else if (!strncmp(cp, FORMAT_TAG256, FORMAT_TAG256_LEN))
hash_len = SHA256_BINARY_SIZE;
else if (!strncmp(cp, FORMAT_TAG384, FORMAT_TAG384_LEN))
hash_len = SHA384_BINARY_SIZE;
else if (!strncmp(cp, FORMAT_TAG512, FORMAT_TAG512_LEN))
hash_len = SHA512_BINARY_SIZE;
else
return 0;
keeptr = strdup(cp);
cp = keeptr;
while (*cp++ != ' ') ; /* skip the "{x-issha?, " */
if ((cp = strtokm(cp, "}")) == NULL)
goto err;
if (!isdecu(cp))
goto err;
// we want the entire rest of the line here, to mime compare.
if ((cp = strtokm(NULL, "")) == NULL)
goto err;
if (strlen(cp) != base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ|flg_Base64_MIME_TRAIL_EQ_CNT, 0))
goto err;
len = base64_convert(cp, e_b64_mime, strlen(cp), tmp, e_b64_raw,
sizeof(tmp), flg_Base64_MIME_TRAIL_EQ, 0);
len -= hash_len;
if (len < 1 || len > SALT_LENGTH)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void set_salt(void *salt)
{
sapH_cur_salt = (struct sapH_salt*)salt;
}
static void set_key(char *key, int index)
{
strcpy((char*)saved_plain[index], key);
}
static char *get_key(int index)
{
return (char*)saved_plain[index];
}
static int cmp_all(void *binary, int count) {
int index;
for (index = 0; index < count; index++)
if (*(ARCH_WORD_32*)binary == *(ARCH_WORD_32*)crypt_key[index])
return 1;
return 0;
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int cmp_one(void * binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static void crypt_all_1(int count) {
int idx=0;
#if defined(_OPENMP)
#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)
#endif
for (idx = 0; idx < count; idx += NBKEYS1)
{
SHA_CTX ctx;
uint32_t i;
#if !defined (SIMD_COEF_32)
uint32_t len = strlen(saved_plain[idx]);
unsigned char tmp[PLAINTEXT_LENGTH+SHA1_BINARY_SIZE], *cp=&tmp[len];
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_plain[idx], len);
SHA1_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
strcpy((char*)tmp, saved_plain[idx]);
len += SHA1_BINARY_SIZE;
SHA1_Final(cp, &ctx);
for (i = 1; i < sapH_cur_salt->iter; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, tmp, len);
SHA1_Final(cp, &ctx);
}
memcpy(crypt_key[idx], cp, BINARY_SIZE);
#else
unsigned char _IBuf[64*NBKEYS1+MEM_ALIGN_SIMD], *keys, tmpBuf[20], _OBuf[20*NBKEYS1+MEM_ALIGN_SIMD], *crypt;
uint32_t j, *crypt32, offs[NBKEYS1], len;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);
crypt32 = (uint32_t*)crypt;
memset(keys, 0, 64*NBKEYS1);
for (i = 0; i < NBKEYS1; ++i) {
len = strlen(saved_plain[idx+i]);
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_plain[idx+i], len);
SHA1_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
SHA1_Final(tmpBuf, &ctx);
memcpy(&keys[i<<6], saved_plain[idx+i], len);
memcpy(&keys[(i<<6)+len], tmpBuf, 20);
keys[(i<<6)+len+20] = 0x80;
offs[i] = len;
len += 20;
keys[(i<<6)+60] = (len<<3)&0xff;
keys[(i<<6)+61] = (len>>5);
}
for (i = 1; i < sapH_cur_salt->iter; ++i) {
uint32_t k;
SIMDSHA1body(keys, crypt32, NULL, SSEi_FLAT_IN);
for (k = 0; k < NBKEYS1; ++k) {
uint32_t *pcrypt = &crypt32[ ((k/SIMD_COEF_32)*(SIMD_COEF_32*5)) + (k&(SIMD_COEF_32-1))];
uint32_t *Icp32 = (uint32_t *)(&keys[(k<<6)+offs[k]]);
for (j = 0; j < 5; ++j) {
Icp32[j] = JOHNSWAP(*pcrypt);
pcrypt += SIMD_COEF_32;
}
}
}
// now marshal into crypt_out;
for (i = 0; i < NBKEYS1; ++i) {
uint32_t *Optr32 = (uint32_t*)(crypt_key[idx+i]);
uint32_t *Iptr32 = &crypt32[ ((i/SIMD_COEF_32)*(SIMD_COEF_32*5)) + (i&(SIMD_COEF_32-1))];
// we only want 16 bytes, not 20
for (j = 0; j < 4; ++j) {
Optr32[j] = JOHNSWAP(*Iptr32);
Iptr32 += SIMD_COEF_32;
}
}
#endif
}
}
static void crypt_all_256(int count) {
int idx;
#if defined(_OPENMP)
#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)
#endif
for (idx = 0; idx < count; idx += NBKEYS256) {
SHA256_CTX ctx;
uint32_t i;
#if !defined (SIMD_COEF_32)
uint32_t len = strlen(saved_plain[idx]);
unsigned char tmp[PLAINTEXT_LENGTH+SHA256_BINARY_SIZE], *cp=&tmp[len];
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_plain[idx], len);
SHA256_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
strcpy((char*)tmp, saved_plain[idx]);
len += SHA256_BINARY_SIZE;
SHA256_Final(cp, &ctx);
for (i = 1; i < sapH_cur_salt->iter; ++i) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, tmp, len);
SHA256_Final(cp, &ctx);
}
memcpy(crypt_key[idx], cp, BINARY_SIZE);
#else
unsigned char _IBuf[64*NBKEYS256+MEM_ALIGN_SIMD], *keys, tmpBuf[32], _OBuf[32*NBKEYS256+MEM_ALIGN_SIMD], *crypt;
uint32_t j, *crypt32, offs[NBKEYS256], len;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);
crypt32 = (uint32_t*)crypt;
memset(keys, 0, 64*NBKEYS256);
for (i = 0; i < NBKEYS256; ++i) {
len = strlen(saved_plain[idx+i]);
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_plain[idx+i], len);
SHA256_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
SHA256_Final(tmpBuf, &ctx);
memcpy(&keys[i<<6], saved_plain[idx+i], len);
memcpy(&keys[(i<<6)+len], tmpBuf, 32);
keys[(i<<6)+len+32] = 0x80;
offs[i] = len;
len += 32;
keys[(i<<6)+60] = (len<<3)&0xff;
keys[(i<<6)+61] = (len>>5);
}
for (i = 1; i < sapH_cur_salt->iter; ++i) {
uint32_t k;
SIMDSHA256body(keys, crypt32, NULL, SSEi_FLAT_IN);
for (k = 0; k < NBKEYS256; ++k) {
uint32_t *pcrypt = &crypt32[ ((k/SIMD_COEF_32)*(SIMD_COEF_32*8)) + (k&(SIMD_COEF_32-1))];
uint32_t *Icp32 = (uint32_t *)(&keys[(k<<6)+offs[k]]);
for (j = 0; j < 8; ++j) {
Icp32[j] = JOHNSWAP(*pcrypt);
pcrypt += SIMD_COEF_32;
}
}
}
// now marshal into crypt_out;
for (i = 0; i < NBKEYS256; ++i) {
uint32_t *Optr32 = (uint32_t*)(crypt_key[idx+i]);
uint32_t *Iptr32 = &crypt32[ ((i/SIMD_COEF_32)*(SIMD_COEF_32*8)) + (i&(SIMD_COEF_32-1))];
// we only want 16 bytes, not 32
for (j = 0; j < 4; ++j) {
Optr32[j] = JOHNSWAP(*Iptr32);
Iptr32 += SIMD_COEF_32;
}
}
#endif
}
}
static void crypt_all_384(int count) {
int idx;
#if defined(_OPENMP)
#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)
#endif
for (idx = 0; idx < count; idx+=NBKEYS512) {
SHA512_CTX ctx;
uint32_t i;
#if !defined SIMD_COEF_64
uint32_t len = strlen(saved_plain[idx]);
unsigned char tmp[PLAINTEXT_LENGTH+SHA384_BINARY_SIZE], *cp=&tmp[len];
SHA384_Init(&ctx);
SHA384_Update(&ctx, saved_plain[idx], len);
SHA384_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
strcpy((char*)tmp, saved_plain[idx]);
len += SHA384_BINARY_SIZE;
SHA384_Final(cp, &ctx);
for (i = 1; i < sapH_cur_salt->iter; ++i) {
SHA384_Init(&ctx);
SHA384_Update(&ctx, tmp, len);
SHA384_Final(cp, &ctx);
}
memcpy(crypt_key[idx], cp, BINARY_SIZE);
#else
unsigned char _IBuf[128*NBKEYS512+MEM_ALIGN_SIMD], *keys, tmpBuf[64], _OBuf[64*NBKEYS512+MEM_ALIGN_SIMD], *crypt;
ARCH_WORD_64 j, *crypt64, offs[NBKEYS512];
uint32_t len;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);
crypt64 = (ARCH_WORD_64*)crypt;
memset(keys, 0, 128*NBKEYS512);
for (i = 0; i < NBKEYS512; ++i) {
len = strlen(saved_plain[idx+i]);
SHA384_Init(&ctx);
SHA384_Update(&ctx, saved_plain[idx+i], len);
SHA384_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
SHA384_Final(tmpBuf, &ctx);
memcpy(&keys[i<<7], saved_plain[idx+i], len);
memcpy(&keys[(i<<7)+len], tmpBuf, 48);
keys[(i<<7)+len+48] = 0x80;
offs[i] = len;
len += 48;
keys[(i<<7)+120] = (len<<3)&0xff;
keys[(i<<7)+121] = (len>>5);
}
for (i = 1; i < sapH_cur_salt->iter; ++i) {
uint32_t k;
SIMDSHA512body(keys, crypt64, NULL, SSEi_FLAT_IN|SSEi_CRYPT_SHA384);
for (k = 0; k < NBKEYS512; ++k) {
ARCH_WORD_64 *pcrypt = &crypt64[ ((k/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (k&(SIMD_COEF_64-1))];
ARCH_WORD_64 *Icp64 = (ARCH_WORD_64 *)(&keys[(k<<7)+offs[k]]);
for (j = 0; j < 6; ++j) {
Icp64[j] = JOHNSWAP64(*pcrypt);
pcrypt += SIMD_COEF_64;
}
}
}
// now marshal into crypt_out;
for (i = 0; i < NBKEYS512; ++i) {
ARCH_WORD_64 *Optr64 = (ARCH_WORD_64*)(crypt_key[idx+i]);
ARCH_WORD_64 *Iptr64 = &crypt64[ ((i/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (i&(SIMD_COEF_64-1))];
// we only want 16 bytes, not 48
for (j = 0; j < 2; ++j) {
Optr64[j] = JOHNSWAP64(*Iptr64);
Iptr64 += SIMD_COEF_64;
}
}
#endif
}
}
static void crypt_all_512(int count) {
int idx;
#if defined(_OPENMP)
#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)
#endif
for (idx = 0; idx < count; idx+=NBKEYS512) {
SHA512_CTX ctx;
uint32_t i;
#if !defined SIMD_COEF_64
uint32_t len = strlen(saved_plain[idx]);
unsigned char tmp[PLAINTEXT_LENGTH+SHA512_BINARY_SIZE], *cp=&tmp[len];
SHA512_Init(&ctx);
SHA512_Update(&ctx, saved_plain[idx], len);
SHA512_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
strcpy((char*)tmp, saved_plain[idx]);
len += SHA512_BINARY_SIZE;
SHA512_Final(cp, &ctx);
for (i = 1; i < sapH_cur_salt->iter; ++i) {
SHA512_Init(&ctx);
SHA512_Update(&ctx, tmp, len);
SHA512_Final(cp, &ctx);
}
memcpy(crypt_key[idx], cp, BINARY_SIZE);
#else
unsigned char _IBuf[128*NBKEYS512+MEM_ALIGN_SIMD], *keys, tmpBuf[64], _OBuf[64*NBKEYS512+MEM_ALIGN_SIMD], *crypt;
ARCH_WORD_64 j, *crypt64, offs[NBKEYS512];
uint32_t len;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);
crypt64 = (ARCH_WORD_64*)crypt;
memset(keys, 0, 128*NBKEYS512);
for (i = 0; i < NBKEYS512; ++i) {
len = strlen(saved_plain[idx+i]);
SHA512_Init(&ctx);
SHA512_Update(&ctx, saved_plain[idx+i], len);
SHA512_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
SHA512_Final(tmpBuf, &ctx);
memcpy(&keys[i<<7], saved_plain[idx+i], len);
memcpy(&keys[(i<<7)+len], tmpBuf, 64);
keys[(i<<7)+len+64] = 0x80;
offs[i] = len;
len += 64;
keys[(i<<7)+120] = (len<<3)&0xff;
keys[(i<<7)+121] = (len>>5);
}
for (i = 1; i < sapH_cur_salt->iter; ++i) {
uint32_t k;
SIMDSHA512body(keys, crypt64, NULL, SSEi_FLAT_IN);
for (k = 0; k < NBKEYS512; ++k) {
ARCH_WORD_64 *pcrypt = &crypt64[ ((k/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (k&(SIMD_COEF_64-1))];
ARCH_WORD_64 *Icp64 = (ARCH_WORD_64 *)(&keys[(k<<7)+offs[k]]);
for (j = 0; j < 8; ++j) {
Icp64[j] = JOHNSWAP64(*pcrypt);
pcrypt += SIMD_COEF_64;
}
}
}
// now marshal into crypt_out;
for (i = 0; i < NBKEYS512; ++i) {
ARCH_WORD_64 *Optr64 = (ARCH_WORD_64*)(crypt_key[idx+i]);
ARCH_WORD_64 *Iptr64 = &crypt64[((i/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (i&(SIMD_COEF_64-1))];
// we only want 16 bytes, not 64
for (j = 0; j < 2; ++j) {
Optr64[j] = JOHNSWAP64(*Iptr64);
Iptr64 += SIMD_COEF_64;
}
}
#endif
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
/*
* split logic into 4 separate functions, to make the logic more
* simplistic, when we start adding OMP + SIMD code
*/
switch(sapH_cur_salt->type) {
case 1: crypt_all_1(*pcount); break;
case 2: crypt_all_256(*pcount); break;
case 3: crypt_all_384(*pcount); break;
case 4: crypt_all_512(*pcount); break;
}
return *pcount;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char cp[BINARY_SIZE]; /* only stores part the size of each hash */
ARCH_WORD_32 jnk[BINARY_SIZE/4];
} b;
char *cp = ciphertext;
memset(b.cp, 0, sizeof(b.cp));
if (!strncasecmp(cp, FORMAT_TAG, FORMAT_TAG_LEN)) { cp += FORMAT_TAG_LEN; }
else if (!strncasecmp(cp, FORMAT_TAG256, FORMAT_TAG256_LEN)) { cp += FORMAT_TAG256_LEN; }
else if (!strncasecmp(cp, FORMAT_TAG384, FORMAT_TAG384_LEN)) { cp += FORMAT_TAG384_LEN; }
else if (!strncasecmp(cp, FORMAT_TAG512, FORMAT_TAG512_LEN)) { cp += FORMAT_TAG512_LEN; }
else { fprintf(stderr, "error, bad signature in sap-H format!\n"); error(); }
while (*cp != '}') ++cp;
++cp;
base64_convert(cp, e_b64_mime, strlen(cp), b.cp, e_b64_raw,
sizeof(b.cp), flg_Base64_MIME_TRAIL_EQ, 0);
return b.cp;
}
static void *get_salt(char *ciphertext)
{
static struct sapH_salt s;
char *cp = ciphertext;
unsigned char tmp[MAX_BINARY_SIZE+SALT_LENGTH];
int total_len, hash_len = 0;
memset(&s, 0, sizeof(s));
if (!strncasecmp(cp, FORMAT_TAG, FORMAT_TAG_LEN)) { s.type = 1; cp += FORMAT_TAG_LEN; hash_len = SHA1_BINARY_SIZE; }
else if (!strncasecmp(cp, FORMAT_TAG256, FORMAT_TAG256_LEN)) { s.type = 2; cp += FORMAT_TAG256_LEN; hash_len = SHA256_BINARY_SIZE; }
else if (!strncasecmp(cp, FORMAT_TAG384, FORMAT_TAG384_LEN)) { s.type = 3; cp += FORMAT_TAG384_LEN; hash_len = SHA384_BINARY_SIZE; }
else if (!strncasecmp(cp, FORMAT_TAG512, FORMAT_TAG512_LEN)) { s.type = 4; cp += FORMAT_TAG512_LEN; hash_len = SHA512_BINARY_SIZE; }
else { fprintf(stderr, "error, bad signature in sap-H format!\n"); error(); }
sscanf (cp, "%u", &s.iter);
while (*cp != '}') ++cp;
++cp;
total_len = base64_convert(cp, e_b64_mime, strlen(cp), tmp, e_b64_raw,
sizeof(tmp), flg_Base64_MIME_TRAIL_EQ, 0);
s.slen = total_len-hash_len;
memcpy(s.s, &tmp[hash_len], s.slen);
return &s;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
/* we 'could' cash switch the SHA/sha and unify case. If they an vary, we will have to. */
return ciphertext;
}
static int get_hash_0(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_0; }
static int get_hash_1(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_1; }
static int get_hash_2(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_2; }
static int get_hash_3(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_3; }
static int get_hash_4(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_4; }
static int get_hash_5(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_5; }
static int get_hash_6(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_6; }
static int salt_hash(void *salt)
{
unsigned char *cp = (unsigned char*)salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < sizeof(struct sapH_salt); i++)
hash = ((hash << 5) + hash) ^ cp[i];
return hash & (SALT_HASH_SIZE - 1);
}
static unsigned int sapH_type(void *salt)
{
struct sapH_salt *my_salt;
my_salt = (struct sapH_salt *)salt;
return my_salt->type;
}
static unsigned int iteration_count(void *salt)
{
struct sapH_salt *my_salt;
my_salt = (struct sapH_salt *)salt;
return my_salt->iter;
}
struct fmt_main fmt_sapH = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_OMP | FMT_CASE | FMT_8_BIT | FMT_UTF8,
{
"hash type [1:sha1 2:SHA256 3:SHA384 4:SHA512]",
"iteration count",
},
{ FORMAT_TAG, FORMAT_TAG256, FORMAT_TAG384, FORMAT_TAG512 },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{
sapH_type,
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
test_bls.c | /*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
/*
Sign a message and verify the signature
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <amcl/utils.h>
#include <amcl/randapi.h>
#include <amcl/bls_BLS381.h>
#include <amcl/pqnist.h>
#define NTHREADS 8
#define MAXSIZE 256
#define G2LEN 4*BFS_BLS381
#define SIGLEN BFS_BLS381+1
int main()
{
int i,rc;
// Seed value for CSPRNG
char seed[NTHREADS][PQNIST_SEED_LENGTH];
// Message to be sent to Bob
char p[NTHREADS][MAXSIZE];
octet P[NTHREADS];
// BLS signature
char s[NTHREADS][SIGLEN];
octet S[NTHREADS];
// Initialise seed
for(i=0; i<NTHREADS; i++)
{
for(int j=0; j<PQNIST_SEED_LENGTH; j++)
{
seed[i][j] = i;
}
}
// Generate BLS keys
// Alice's BLS keys
char BLSsk[NTHREADS][BGS_BLS381];
char BLSpk[NTHREADS][G2LEN];
#pragma omp parallel for
for(i=0; i<NTHREADS; i++)
{
rc = pqnist_bls_keys(seed[i], BLSpk[i], BLSsk[i]);
if (rc)
{
fprintf(stderr, "FAILURE pqnist_keys rc: %d\n", rc);
exit(EXIT_FAILURE);
}
printf("BLS pklen %d pk: ", G2LEN);
amcl_print_hex(BLSpk[i], G2LEN);
printf("BLS sklen %d BLS sk: ", BGS_BLS381);
amcl_print_hex(BLSsk[i], BGS_BLS381);
printf("\n");
}
// Alice
for(i=0; i<NTHREADS; i++)
{
bzero(p[i],sizeof(p[i]));
P[i].max = MAXSIZE;
P[i].len = sprintf(p[i], "Hello Bob! This is a message from Alice %d", i);
P[i].val = p[i];
printf("Alice Plaintext: ");
OCT_output_string(&P[i]);
printf("\n");
}
for(i=0; i<NTHREADS; i++)
{
bzero(s[i],sizeof(s[i]));
S[i].max = SIGLEN;
S[i].len = SIGLEN;
S[i].val = s[i];
}
#pragma omp parallel for
for(i=0; i<NTHREADS; i++)
{
// Alice signs message
rc = pqnist_bls_sign(P[i].val, BLSsk[i], S[i].val);
if(rc)
{
fprintf(stderr, "FAILURE pqnist_bls_sign rc: %d\n", rc);
exit(EXIT_FAILURE);
}
printf("Alice SIGlen %d SIG", S[i].len);
OCT_output(&S[i]);
printf("\n");
}
#pragma omp parallel for
for(i=0; i<NTHREADS; i++)
{
// Bob verifies message
rc = pqnist_bls_verify(P[i].val, BLSpk[i], S[i].val);
if (rc)
{
fprintf(stderr, "FAILURE pqnist_bls_verify rc: %d\n", rc);
exit(EXIT_FAILURE);
}
else
{
printf("Test %d pqnist_bls_verify rc: %d\n", i, rc);
OCT_output_string(&P[i]);
printf("\n");
}
}
// clear memory
for(i=0; i<NTHREADS; i++)
{
OCT_clear(&P[i]);
OCT_clear(&S[i]);
}
printf("SUCCESS\n");
exit(EXIT_SUCCESS);
}
|
serialized.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
#include <math.h>
int main()
{
omp_set_nested(0);
print_frame(0);
#pragma omp parallel num_threads(2)
{
print_frame(1);
print_ids(0);
print_ids(1);
print_frame(0);
#pragma omp master
{
print_ids(0);
int t = (int)sin(0.1);
#pragma omp task if(t)
{
print_frame(1);
print_ids(0);
print_ids(1);
print_ids(2);
}
print_ids(0);
}
print_ids(0);
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_barrier_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_barrier_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_wait_barrier_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_wait_barrier_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_task_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_task_switch'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_task_end'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[NULL]], new_task_id={{[0-9]+}}, parallel_function=0x{{[0-f]+}}, task_type=ompt_task_initial=1, has_dependences=no
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[MAIN_REENTER]], parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2, parallel_function=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// nested parallel masters
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(1)=[[EXIT:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]]
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter=[[REENTER]], new_task_id=[[TASK_ID:[0-9]+]], parallel_function=[[TASK_FUNCTION:0x[0-f]+]]
// <- ompt_event_task_schedule ([[IMPLICIT_TASK_ID]], [[TASK_ID]]) would be expected here
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(1)=[[TASK_EXIT:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[REENTER]]
// CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]]
// <- ompt_event_task_schedule ([[TASK_ID]], [[IMPLICIT_TASK_ID]]) would be expected here
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reen
// implicit barrier parallel
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(1)=[[EXIT:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]]
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
[Gyan]-10_omp_producer_consumer_unbounded.c | #include<stdio.h>
#include<stdlib.h>
#include<omp.h>
struct node
{
int dat;
struct node *next;
};
typedef struct node node;
int count = 0;
int enqueue(node **head, int d);
int dequeue(node **head);
void disp(node *head);
void producer();
void consumer();
int main()
{
node *head = NULL;
int id;
omp_set_dynamic(0);
#pragma omp parallel num_threads(2)
{
id = omp_get_thread_num();
if(id == 0)
{
while(1)
{
producer(&head);
fgetc(stdin);
}
}
else
{
while(1)
{
consumer(&head);
fgetc(stdin);
}
}
}
return 0;
}
void producer(node **head)
{
#pragma omp critical
{
int x = enqueue(head, count+1);
if(!x)
printf("producer failed! memory full! \n");
else
{
count++;
printf("produced %d \n", count);
}
}
}
void consumer(node **head)
{
#pragma omp critical
{
int x = dequeue(head);
if(!x)
printf("nothing left to consume!! \n");
else
printf("consumed %d \n", x);
}
}
int enqueue(node **head, int d)
{
node *nn = (node *)malloc(sizeof(node));
if(nn == NULL)//allocation failed
return 0;
nn->dat = d;
nn->next = NULL;
if(*head == NULL)
*head = nn;
else
{
node *ptr = *head;
while(ptr->next != NULL)
ptr = ptr->next;
ptr->next = nn;
}
return 1;
}
int dequeue(node **head)
{
int tmp;
node *n;
if(*head == NULL)//empty
return 0;
else
{
n = *head;
tmp = (*head)->dat;
*head = (*head)->next;
free(n);
return tmp;
}
}
void disp(node *head)
{
node *p = head;
printf("List : ");
while(p != NULL)
{
printf("%d ", p->dat);
p = p->next;
}
printf("\n");
} |
vdtactivelist.h | #ifndef _VDT_ACTIVE_LIST_
#define _VDT_ACTIVE_LIST_
#define FALSE 0
#define TRUE 1
#ifndef NULL
#define NULL 0
#endif
#include <stdio.h>
#include <stdlib.h>
// ptrDataType should be a pointer.
template <class ptrDataType>
class CActiveList
{
public :
ptrDataType m_pStart, m_pEnd;
int m_Size;
// iterator pointer
ptrDataType m_pCurrentNode;
// Mark
ptrDataType m_pMark; // store one node for user-defined purpose.
inline CActiveList (void);
inline ~CActiveList (void);
inline void InitList (ptrDataType pStart, ptrDataType pEnd);
inline int Delete (ptrDataType pNode);
inline void Clear(bool deleteNodes = false);
inline void Add (ptrDataType pNode);
inline void ForceAdd (ptrDataType pNode);
inline void AddNext (ptrDataType pPivotNode, ptrDataType pNode);
inline void AddBefore (ptrDataType pPivotNode, ptrDataType pNode);
inline void AddatEnd (ptrDataType pNode);
inline ptrDataType Head (void);
inline ptrDataType End (void);
inline int IsEmpty (void);
inline int Size (void);
// operation with Mark
inline int DeleteWithMark (ptrDataType pNode);
inline void SetMark (ptrDataType pNode);
inline ptrDataType GetMark (void);
// simple iterator.
inline void InitIteration (void);
inline void InitIteration (ptrDataType pNode);
inline void SetCurrent (ptrDataType pNode);
inline int IsEnd (void);
inline ptrDataType GetCurrent (void);
inline void Advance (void);
inline void BackAdvance (void);
};
template <class ptrDataType>
inline CActiveList <ptrDataType>::CActiveList (void)
{
m_pStart = m_pEnd = NULL;
//InitList ();
/*
m_pStart = new ptrDataType;
m_pEnd = new ptrDataType;
InitList (m_pStart, m_pEnd);
*/
}
template <class ptrDataType>
inline CActiveList <ptrDataType>::~CActiveList (void)
{
if (m_pStart != NULL) {
delete m_pStart;
m_pStart = NULL;
}
if (m_pEnd != NULL) {
delete m_pEnd;
m_pEnd = NULL;
}
}
// to use this class, we have create Start and End node. then assign the pointers of this
// to this function.
template <class ptrDataType>
inline void CActiveList <ptrDataType>::InitList (ptrDataType pStart, ptrDataType pEnd)
{
m_pStart = pStart;
m_pEnd = pEnd;
// make a double list.
m_pStart->m_pPrev = NULL;
m_pStart->m_pNext = m_pEnd;
m_pEnd->m_pPrev = m_pStart;
m_pEnd->m_pNext = NULL;
m_Size = 0;
m_pCurrentNode = 0;
m_pMark = 0;
}
template <class ptrDataType>
inline int CActiveList <ptrDataType>::IsEmpty (void)
{
if (m_pStart->m_pNext == m_pEnd)
return TRUE;
return FALSE;
}
template <class ptrDataType>
inline int CActiveList <ptrDataType>::Delete (ptrDataType pNode)
{
if (pNode->m_pNext == NULL || pNode->m_pPrev == NULL) // if this isn't an active one.
return FALSE;
if (pNode == m_pCurrentNode)
SetCurrent (m_pCurrentNode->m_pPrev);
pNode->m_pPrev->m_pNext = pNode->m_pNext;
pNode->m_pNext->m_pPrev = pNode->m_pPrev;
pNode->m_pPrev = NULL;
pNode->m_pNext = NULL;
m_Size--;
return TRUE;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::Clear (bool deleteNodes)
{
m_Size = 0;
if (deleteNodes)
{
ptrDataType cur, next;
cur = m_pStart->m_pNext;
while (0 != cur && m_pEnd != cur)
{
next = cur->m_pNext;
delete cur;
cur = next;
}
}
m_pEnd->m_pPrev = m_pStart;
m_pStart->m_pNext = m_pEnd;
}
// if Mark is deleted, changed Mark into next one.
template <class ptrDataType>
inline int CActiveList <ptrDataType>::DeleteWithMark (ptrDataType pNode)
{
if (pNode->m_pNext == NULL || pNode->m_pPrev == NULL) // temporary solution.
return FALSE;
if (pNode == m_pMark) { // user-purpose.
m_pMark = m_pMark->m_pNext;
}
pNode->m_pPrev->m_pNext = pNode->m_pNext;
pNode->m_pNext->m_pPrev = pNode->m_pPrev;
pNode->m_pPrev = NULL;
pNode->m_pNext = NULL;
m_Size--;
return TRUE; // it means it delete element.
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::SetMark (ptrDataType pNode)
{
m_pMark = pNode;
}
template <class ptrDataType>
inline ptrDataType CActiveList <ptrDataType>::GetMark (void)
{
return m_pMark;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::Add (ptrDataType pNode)
{
if (pNode->m_pNext != NULL) // already inserted in list
return;
// add node after m_Start, which is a root node
pNode->m_pNext = m_pStart->m_pNext;
pNode->m_pPrev = m_pStart;
pNode->m_pNext->m_pPrev = pNode;
m_pStart->m_pNext = pNode;
m_Size++;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::ForceAdd (ptrDataType pNode)
{
#ifdef _USE_OPENMP
#pragma omp critical
#endif
{
if (pNode->m_pNext != NULL) { // already inserted in list
Delete (pNode);
}
// add node after m_Start, which is a root node
pNode->m_pNext = m_pStart->m_pNext;
pNode->m_pPrev = m_pStart;
pNode->m_pNext->m_pPrev = pNode;
m_pStart->m_pNext = pNode;
m_Size++;
}
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::AddatEnd (ptrDataType pNode)
{
if (pNode->m_pNext != NULL) // already inserted in list
return;
// add node before m_pEnd, which is a root node
pNode->m_pNext = m_pEnd;
pNode->m_pPrev = m_pEnd->m_pPrev;
m_pEnd->m_pPrev->m_pNext = pNode;
m_pEnd->m_pPrev = pNode;
m_Size++;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::AddNext (ptrDataType pPivotNode, ptrDataType pNode)
{
if (pNode->m_pNext != NULL) { // already inserted in list
// printf ("To check if it might be unnecessary code.\n");
// exit (-1);
return;
}
// add node after m_pPivotNode, which is a root node
pNode->m_pNext = pPivotNode->m_pNext;
pNode->m_pPrev = pPivotNode;
pNode->m_pNext->m_pPrev = pNode;
pPivotNode->m_pNext = pNode;
m_Size++;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::AddBefore (ptrDataType pPivotNode, ptrDataType pNode)
{
if (pNode->m_pNext != NULL) { // already inserted in list
// printf ("To check if it might be unnecessary code.\n");
// exit (-1);
return;
}
// add node before m_pPivotNode
//
pNode->m_pNext = pPivotNode;
pNode->m_pPrev = pPivotNode->m_pPrev;
pPivotNode->m_pPrev->m_pNext = pNode;
pPivotNode->m_pPrev = pNode;
m_Size++;
}
template <class ptrDataType>
inline ptrDataType CActiveList <ptrDataType>::Head (void)
{
return m_pStart->m_pNext;
}
template <class ptrDataType>
inline int CActiveList <ptrDataType>::Size (void)
{
return m_Size;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::InitIteration (void)
{
ptrDataType pRootNode = Head ();
//SetCurrent (pRootNode);
// above code produce message if list is empty.
m_pCurrentNode = pRootNode;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::InitIteration (ptrDataType pNode)
{
ptrDataType pRootNode = pNode;
//SetCurrent (pRootNode);
// above code produce message if list is empty.
m_pCurrentNode = pRootNode;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::SetCurrent (ptrDataType pNode)
{
#ifdef DEBUG_MODE
if (pNode->m_pNext == NULL) {
printf ("Error : Invalid Current Node\n");
exit (-1);
}
#endif
m_pCurrentNode = pNode;
}
template <class ptrDataType>
inline int CActiveList <ptrDataType>::IsEnd (void)
{
if (m_pCurrentNode == m_pEnd)
return TRUE;
return FALSE;
}
template <class ptrDataType>
inline ptrDataType CActiveList <ptrDataType>::GetCurrent (void)
{
return m_pCurrentNode;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::Advance (void)
{
m_pCurrentNode = m_pCurrentNode->m_pNext;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::BackAdvance (void)
{
m_pCurrentNode = m_pCurrentNode->m_pPrev;
}
#endif
|
ejercicio9a.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
//#define PRINTF_ALL
main(int argc, char **argv)
{
if(argc < 3) {
fprintf(stderr,"Falta fila y columna\n");
exit(-1);
}
struct timespec cgt1,cgt2; double ncgt; //para tiempo de ejecución
int i,k, f = atoi(argv[1]);
int c = atoi(argv[2]);
double *v1,*v2;
v1 = (double*)malloc(f*sizeof(double));
v2 = (double*)malloc(f*sizeof(double));
double sumalocal=0;
double **m;
m = (double**)malloc(f*sizeof(double*));
//Inicializo v1 y reservo el espacio para la matriz
#pragma omp parallel for
for(i=0;i<c;++i){
m[i]=(double*)malloc(c*sizeof(double));
v1[i]=2;
}
//Inicializo la matriz
#pragma omp parallel private(k)
{
#pragma omp for
for (i=0; i<f; i++)
for(k=0;k<c;++k)
m[i][k]=2;
}
//Calculo la multiplicacion de la matriz por el vector y obtengo el tiempo
clock_gettime(CLOCK_REALTIME,&cgt1);
#pragma omp parallel private(k,sumalocal)
{
sumalocal=0;
#pragma omp for
for (i=0; i<f; i++){
for(k=0;k<c;++k)
sumalocal+=m[i][k]*v1[k];
#pragma omp critical
{
v2[i]=sumalocal;
sumalocal=0;
}
}
}
clock_gettime(CLOCK_REALTIME,&cgt2);
ncgt=(double) (cgt2.tv_sec-cgt1.tv_sec)+(double) ((cgt2.tv_nsec-cgt1.tv_nsec)/(1.e+9));
//Imprimo los resultados
#ifdef PRINTF_ALL
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\n",ncgt,f);
for (i=0; i<f; i++){
for(k=0;k<c;++k){
printf("/ m[%d][%d]*V1[%d]=v2[%i] (%8.6f*%8.6f=%8.6f) /\n",i,k,k,i,m[i][k],v1[k],v2[i]);
}
}
#else
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\t/ m[0][0]*V1[0]=V2[0](%8.6f+%8.6f=%8.6f) // m[%d][%d]*V1[%d]=V2[%d](%8.6f+%8.6f=%8.6f) /\n", ncgt,f,m[0][0],v1[0],v2[0],f-1,c-1,f-1,f-1,m[f-1][c-1],v1[f-1],v2[f-1]);
#endif
free(v1); // libera el espacio reservado para v1
free(v2); // libera el espacio reservado para v2
for(i=0;i<c;++i){
free(m[i]);
}
free(m); // libera el espacio reservado para m
}
|
par_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
//fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; */
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
{
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
}
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
{
P_marker[i] = 0;
}
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
{
if (CF_marker[i] == -3) CF_marker[i] = -1;
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpHE
* interpolation routine for hyperbolic PDEs
* treats weak fine connections like strong fine connections
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpHE( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly influence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(A_ext);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildDirInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildDirInterpHost( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real diagonal;
HYPRE_Real sum_N_pos, sum_P_pos;
HYPRE_Real sum_N_neg, sum_P_neg;
HYPRE_Real alfa = 1.0;
HYPRE_Real beta = 1.0;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd,sum_P_pos,sum_P_neg,sum_N_pos,sum_N_neg,alfa,beta) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
HYPRE_Int *P_marker, *P_marker_offd;
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
sum_N_pos = 0;
sum_N_neg = 0;
sum_P_pos = 0;
sum_P_neg = 0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (num_functions == 1 || dof_func[i1] == dof_func[i])
{
if (A_diag_data[jj] > 0)
sum_N_pos += A_diag_data[jj];
else
sum_N_neg += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
if (A_diag_data[jj] > 0)
sum_P_pos += A_diag_data[jj];
else
sum_P_neg += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (num_functions == 1 || dof_func_offd[i1] == dof_func[i])
{
if (A_offd_data[jj] > 0)
sum_N_pos += A_offd_data[jj];
else
sum_N_neg += A_offd_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
if (A_offd_data[jj] > 0)
sum_P_pos += A_offd_data[jj];
else
sum_P_neg += A_offd_data[jj];
}
}
}
if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal;
if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
if (P_diag_data[jj]> 0)
P_diag_data[jj] *= -beta;
else
P_diag_data[jj] *= -alfa;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
if (P_offd_data[jj]> 0)
P_offd_data[jj] *= -beta;
else
P_offd_data[jj] *= -alfa;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
HYPRE_Int *P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildDirInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
HYPRE_Int interp_type,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPushRange("DirInterp");
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_BoomerAMGBuildDirInterpDevice(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts,col_offd_S_to_A,
interp_type, P_ptr);
}
else
#endif
{
ierr = hypre_BoomerAMGBuildDirInterpHost(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts,col_offd_S_to_A, P_ptr);
}
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPopRange();
#endif
return ierr;
}
/*------------------------------------------------
* Drop entries in interpolation matrix P
*
*------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
HYPRE_Int rescale = 1; // rescale P
HYPRE_Int nrm_type = 0; // Use infty-norm of row to perform treshold dropping
return hypre_ParCSRMatrixTruncate(P, trunc_factor, max_elmts, rescale, nrm_type);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpModUnk - this is a modified interpolation for the unknown approach.
* here we need to pass in a strength matrix built on the entire matrix.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpModUnk( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
HERE, we only want to distribut to points of the SAME function type
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0 )
{
sum += A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
}
else /* sum = 0 - only add to diag if the same function type */
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal. (only if the same function type)
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
AGAIN, we only want to distribut to points of the SAME function type
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
}
else /* sum = 0 */
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGTruncandBuild
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGTruncandBuild( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
hypre_ParCSRCommPkg *commpkg_P = hypre_ParCSRMatrixCommPkg(P);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(P);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(P_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_offd);
HYPRE_BigInt *new_col_map_offd;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int P_offd_size=0, new_num_cols_offd;
HYPRE_Int *P_marker;
HYPRE_Int i;
HYPRE_Int index;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_size = P_offd_i[n_fine];
}
new_num_cols_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < num_cols_offd; i++)
P_marker[i] = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
new_num_cols_offd++;
P_marker[index] = 1;
}
}
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST);
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < new_num_cols_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
new_num_cols_offd);
}
index = 0;
for (i = 0; i < new_num_cols_offd; i++)
{
while (P_marker[index] == 0) index++;
new_col_map_offd[i] = col_map_offd[index];
index++;
}
if (P_offd_size) hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (new_num_cols_offd)
{
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(col_map_offd, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_num_cols_offd;
}
if (commpkg_P != NULL) hypre_MatvecCommPkgDestroy(commpkg_P);
hypre_MatvecCommPkgCreate(P);
return hypre_error_flag;
}
hypre_ParCSRMatrix *hypre_CreateC( hypre_ParCSRMatrix *A,
HYPRE_Real w)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *C_diag;
hypre_CSRMatrix *C_offd;
HYPRE_Real *C_diag_data;
HYPRE_Int *C_diag_i;
HYPRE_Int *C_diag_j;
HYPRE_Real *C_offd_data;
HYPRE_Int *C_offd_i;
HYPRE_Int *C_offd_j;
HYPRE_BigInt *col_map_offd_C;
HYPRE_Int i, j, index;
HYPRE_Real invdiag;
HYPRE_Real w_local = w;
C = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_rows, row_starts,
row_starts, num_cols_offd, A_diag_i[num_rows], A_offd_i[num_rows]);
hypre_ParCSRMatrixInitialize(C);
C_diag = hypre_ParCSRMatrixDiag(C);
C_offd = hypre_ParCSRMatrixOffd(C);
C_diag_i = hypre_CSRMatrixI(C_diag);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
C_offd_i = hypre_CSRMatrixI(C_offd);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
col_map_offd_C = hypre_ParCSRMatrixColMapOffd(C);
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
for (i=0; i < num_cols_offd; i++)
col_map_offd_C[i] = col_map_offd_A[i];
for (i=0; i < num_rows; i++)
{
index = A_diag_i[i];
invdiag = -w/A_diag_data[index];
C_diag_data[index] = 1.0-w;
C_diag_j[index] = A_diag_j[index];
if (w == 0)
{
w_local = fabs(A_diag_data[index]);
for (j = index+1; j < A_diag_i[i+1]; j++)
w_local += fabs(A_diag_data[j]);
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
w_local += fabs(A_offd_data[j]);
invdiag = -1/w_local;
C_diag_data[index] = 1.0-A_diag_data[index]/w_local;
}
C_diag_i[i] = index;
C_offd_i[i] = A_offd_i[i];
for (j = index+1; j < A_diag_i[i+1]; j++)
{
C_diag_data[j] = A_diag_data[j]*invdiag;
C_diag_j[j] = A_diag_j[j];
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
C_offd_data[j] = A_offd_data[j]*invdiag;
C_offd_j[j] = A_offd_j[j];
}
}
C_diag_i[num_rows] = A_diag_i[num_rows];
C_offd_i[num_rows] = A_offd_i[num_rows];
return C;
}
/* RL */
HYPRE_Int
hypre_BoomerAMGBuildInterpOnePnt( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
//HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
/* csr's */
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
/* arrays */
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int num_cols_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
/* CF marker off-diag part */
HYPRE_Int *CF_marker_offd = NULL;
/* func type off-diag part */
HYPRE_Int *dof_func_offd = NULL;
/* nnz */
HYPRE_Int nnz_diag, nnz_offd, cnt_diag, cnt_offd;
HYPRE_Int *marker_diag, *marker_offd = NULL;
/* local size */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
/* number of C-pts */
HYPRE_Int n_cpts = 0;
/* fine to coarse mapping: diag part and offd part */
HYPRE_Int *fine_to_coarse;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_BigInt total_global_cpts, my_first_cpt;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data = NULL;
HYPRE_BigInt *big_int_buf_data = NULL;
//HYPRE_Int col_start = hypre_ParCSRMatrixFirstRowIndex(A);
//HYPRE_Int col_end = col_start + n_fine;
HYPRE_Int i, j, i1, j1, k1, index, start;
HYPRE_Int *max_abs_cij;
char *max_abs_diag_offd;
HYPRE_Real max_abs_aij, vv;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
/* CF marker for the off-diag columns */
if (num_cols_A_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
}
/* function type indicator for the off-diag columns */
if (num_functions > 1 && num_cols_A_offd)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
}
/* if CommPkg of A is not present, create it */
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* number of sends to do (number of procs) */
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* send buffer, of size send_map_starts[num_sends]),
* i.e., number of entries to send */
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST);
/* copy CF markers of elements to send to buffer
* RL: why copy them with two for loops? Why not just loop through all in one */
index = 0;
for (i = 0; i < num_sends; i++)
{
/* start pos of elements sent to send_proc[i] */
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
/* loop through all elems to send_proc[i] */
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
/* CF marker of send_map_elemts[j] */
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
/* create a handle to start communication. 11: for integer */
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd);
/* destroy the handle to finish communication */
hypre_ParCSRCommHandleDestroy(comm_handle);
/* do a similar communication for dof_func */
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
hypre_TFree(int_buf_data,HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping,
* and find the most strongly influencing C-pt for each F-pt
*-----------------------------------------------------------------------*/
/* nnz in diag and offd parts */
cnt_diag = 0;
cnt_offd = 0;
max_abs_cij = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
max_abs_diag_offd = hypre_CTAlloc(char, n_fine,HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
/* markers initialized as zeros */
marker_diag = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
//fine_to_coarse[i] = my_first_cpt + n_cpts;
fine_to_coarse[i] = n_cpts;
n_cpts++;
continue;
}
/* mark all the strong connections: in S */
HYPRE_Int MARK = i + 1;
/* loop through row i of S, diag part */
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
marker_diag[S_diag_j[j]] = MARK;
}
/* loop through row i of S, offd part */
if (num_procs > 1)
{
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
j1 = col_offd_S_to_A ? col_offd_S_to_A[S_offd_j[j]] : S_offd_j[j];
marker_offd[j1] = MARK;
}
}
fine_to_coarse[i] = -1;
/*---------------------------------------------------------------------------
* If i is an F-pt, interpolation is from the most strongly influencing C-pt
* Find this C-pt and save it
*--------------------------------------------------------------------------*/
/* if we failed to find any strong C-pt, mark this point as an 'n' */
char marker = 'n';
/* max abs val */
max_abs_aij = -1.0;
/* loop through row i of A, diag part */
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
i1 = A_diag_j[j];
vv = fabs(A_diag_data[j]);
#if 0
/* !!! this is a hack just for code verification purpose !!!
it basically says:
1. if we see |a_ij| < 1e-14, force it to be 1e-14
2. if we see |a_ij| == the max(|a_ij|) so far exactly,
replace it if the j idx is smaller
Reasons:
1. numerical round-off for eps-level values
2. entries in CSR rows may be listed in different orders
*/
vv = vv < 1e-14 ? 1e-14 : vv;
if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK &&
vv == max_abs_aij && i1 < max_abs_cij[i])
{
/* mark it as a 'd' */
marker = 'd';
max_abs_cij[i] = i1;
max_abs_aij = vv;
continue;
}
#endif
/* it is a strong C-pt and has abs val larger than what have seen */
if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK && vv > max_abs_aij)
{
/* mark it as a 'd' */
marker = 'd';
max_abs_cij[i] = i1;
max_abs_aij = vv;
}
}
/* offd part */
if (num_procs > 1)
{
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
i1 = A_offd_j[j];
vv = fabs(A_offd_data[j]);
if (CF_marker_offd[i1] >= 0 && marker_offd[i1] == MARK && vv > max_abs_aij)
{
/* mark it as an 'o' */
marker = 'o';
max_abs_cij[i] = i1;
max_abs_aij = vv;
}
}
}
max_abs_diag_offd[i] = marker;
if (marker == 'd')
{
cnt_diag ++;
}
else if (marker == 'o')
{
cnt_offd ++;
}
}
nnz_diag = cnt_diag + n_cpts;
nnz_offd = cnt_offd;
/*------------- allocate arrays */
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag,HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, nnz_diag,HYPRE_MEMORY_HOST);
/* not in ``if num_procs > 1'',
* allocation needed even for empty CSR */
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd,HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, nnz_offd,HYPRE_MEMORY_HOST);
/* redundant */
P_diag_i[0] = 0;
P_offd_i[0] = 0;
/* reset counters */
cnt_diag = 0;
cnt_offd = 0;
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd,HYPRE_MEMORY_HOST);
big_int_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
big_int_buf_data[index++] = my_first_cpt
+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_int_buf_data, fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
/*-----------------------------------------------------------------------
* Second Pass: Populate P
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] >= 0)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity.
*--------------------------------------------------------------------*/
//P_diag_j[cnt_diag] = fine_to_coarse[i] - my_first_cpt;
P_diag_j[cnt_diag] = fine_to_coarse[i];
P_diag_data[cnt_diag++] = 1.0;
}
else
{
/*---------------------------------------------------------------------------
* If i is an F-pt, interpolation is from the most strongly influencing C-pt
*--------------------------------------------------------------------------*/
if (max_abs_diag_offd[i] == 'd')
{
/* on diag part of P */
j = max_abs_cij[i];
//P_diag_j[cnt_diag] = fine_to_coarse[j] - my_first_cpt;
P_diag_j[cnt_diag] = fine_to_coarse[j];
P_diag_data[cnt_diag++] = 1.0;
}
else if (max_abs_diag_offd[i] == 'o')
{
/* on offd part of P */
j = max_abs_cij[i];
P_offd_j[cnt_offd] = j;
P_offd_data[cnt_offd++] = 1.0;
}
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
hypre_assert(cnt_diag == nnz_diag);
hypre_assert(cnt_offd == nnz_offd);
/* num of cols in the offd part of P */
num_cols_offd_P = 0;
/* marker_offd: all -1 */
for (i = 0; i < num_cols_A_offd; i++)
{
marker_offd[i] = -1;
}
for (i = 0; i < nnz_offd; i++)
{
i1 = P_offd_j[i];
if (marker_offd[i1] == -1)
{
num_cols_offd_P++;
marker_offd[i1] = 1;
}
}
/* col_map_offd_P: the col indices of the offd of P
* we first keep them be the offd-idx of A */
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P,HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P,HYPRE_MEMORY_HOST);
for (i = 0, i1 = 0; i < num_cols_A_offd; i++)
{
if (marker_offd[i] == 1)
{
tmp_map_offd[i1++] = i;
}
}
hypre_assert(i1 == num_cols_offd_P);
/* now, adjust P_offd_j to local idx w.r.t col_map_offd_R
* by searching */
for (i = 0; i < nnz_offd; i++)
{
i1 = P_offd_j[i];
k1 = hypre_BinarySearch(tmp_map_offd, i1, num_cols_offd_P);
/* search must succeed */
hypre_assert(k1 >= 0 && k1 < num_cols_offd_P);
P_offd_j[i] = k1;
}
/* change col_map_offd_P to global coarse ids */
for (i = 0; i < num_cols_offd_P; i++)
{
col_map_offd_P[i] = fine_to_coarse_offd[tmp_map_offd[i]];
}
/* Now, we should have everything of Parcsr matrix P */
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumCols(A), /* global num of rows */
total_global_cpts, /* global num of cols */
hypre_ParCSRMatrixColStarts(A), /* row_starts */
num_cpts_global, /* col_starts */
num_cols_offd_P, /* num cols offd */
nnz_diag,
nnz_offd);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
/* P does not own ColStarts, since A does */
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
/* create CommPkg of P */
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* free workspace */
hypre_TFree(CF_marker_offd,HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd,HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd,HYPRE_MEMORY_HOST);
hypre_TFree(big_int_buf_data,HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse,HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offd,HYPRE_MEMORY_HOST);
hypre_TFree(marker_diag,HYPRE_MEMORY_HOST);
hypre_TFree(marker_offd,HYPRE_MEMORY_HOST);
hypre_TFree(max_abs_cij,HYPRE_MEMORY_HOST);
hypre_TFree(max_abs_diag_offd,HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
SpatialAdaptiveAveragePooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialAdaptiveAveragePooling.c"
#else
#define START_IND(a,b,c) (int)floor((float)(a * c) / b)
#define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
static void THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(
real *input_p,
real *output_p,
int64_t nslices,
int64_t iwidth,
int64_t iheight,
int64_t owidth,
int64_t oheight,
int64_t stridew,
int64_t strideh,
int64_t strided)
{
int64_t k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
/* loop over output */
int64_t i, j;
for(i = 0; i < oheight; i++)
{
int y_start = START_IND(i, oheight, iheight);
int y_end = END_IND(i, oheight, iheight);
int kH = y_end-y_start;
for(j = 0; j < owidth; j++)
{
int x_start = START_IND(j, owidth, iwidth);
int x_end = END_IND(j, owidth, iwidth);
int kW = x_end-x_start;
/* local pointers */
real *ip = input_p + k*strided + y_start*strideh + x_start*stridew;
real *op = output_p + k*owidth*oheight + i*owidth + j;
/* compute local average: */
real sum = 0;
int x,y;
for(y = 0; y < kH; y++)
{
for(x = 0; x < kW; x++)
{
real val = *(ip + y*strideh + x*stridew);
sum += val;
}
}
/* set output to local average */
*op = sum / kW / kH;
}
}
}
}
void THNN_(SpatialAdaptiveAveragePooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int owidth,
int oheight)
{
int dimw = 2;
int dimh = 1;
int64_t nbatch = 1;
int64_t nslices;
int64_t iheight;
int64_t iwidth;
int64_t istride_d;
int64_t istride_h;
int64_t istride_w;
int64_t istride_b;
real *input_data;
real *output_data;
THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input,
"3D or 4D (batch mode) tensor expected for input, but got: %s");
if (input->nDimension == 4)
{
istride_b = input->stride[0];
nbatch = input->size[0];
dimw++;
dimh++;
}
/* sizes */
nslices = input->size[dimh-1];
iheight = input->size[dimh];
iwidth = input->size[dimw];
/* strides */
istride_d = input->stride[dimh-1];
istride_h = input->stride[dimh];
istride_w = input->stride[dimw];
/* resize output */
if (input->nDimension == 3)
{
THTensor_(resize3d)(output, nslices, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(input_data, output_data,
nslices,
iwidth, iheight,
owidth, oheight,
istride_w,istride_h,
istride_d);
}
else
{
int64_t p;
THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(input_data+p*istride_b, output_data+p*nslices*owidth*oheight,
nslices,
iwidth, iheight,
owidth, oheight,
istride_w,istride_h,
istride_d);
}
}
}
static void THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)(
real *gradInput_p,
real *gradOutput_p,
int64_t nslices,
int64_t iwidth,
int64_t iheight,
int64_t owidth,
int64_t oheight)
{
int64_t k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
real *gradInput_p_k = gradInput_p + k*iwidth*iheight;
real *gradOutput_p_k = gradOutput_p + k*owidth*oheight;
/* calculate average */
int64_t i, j;
for(i = 0; i < oheight; i++)
{
int y_start = START_IND(i, oheight, iheight);
int y_end = END_IND(i, oheight, iheight);
int kH = y_end-y_start;
for(j = 0; j < owidth; j++)
{
int x_start = START_IND(j, owidth, iwidth);
int x_end = END_IND(j, owidth, iwidth);
int kW = x_end-x_start;
int x,y;
for(y = y_start; y < y_end; y++)
{
for(x = x_start; x < x_end; x++)
{
/* update gradient */
gradInput_p_k[y*iwidth + x] += gradOutput_p_k[i*owidth + j] / kW / kH;
}
}
}
}
}
}
void THNN_(SpatialAdaptiveAveragePooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput)
{
int dimw = 2;
int dimh = 1;
int64_t nbatch = 1;
int nslices;
int iheight;
int iwidth;
int oheight;
int owidth;
real *gradInput_data;
real *gradOutput_data;
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->nDimension == 4) {
nbatch = input->size[0];
dimw++;
dimh++;
}
/* sizes */
nslices = input->size[dimh-1];
iheight = input->size[dimh];
iwidth = input->size[dimw];
oheight = gradOutput->size[dimh];
owidth = gradOutput->size[dimw];
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
/* backprop */
if (input->nDimension == 3)
{
THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)(gradInput_data, gradOutput_data,
nslices,
iwidth, iheight,
owidth, oheight);
}
else
{
int64_t p;
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)(gradInput_data+p*nslices*iwidth*iheight, gradOutput_data+p*nslices*owidth*oheight,
nslices,
iwidth, iheight,
owidth, oheight);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
#undef START_IND
#undef END_IND |
conv5x5s1_pack4_neon.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv5x5s1_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
const float* kptr = (const float*)kernel.channel(p).row(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j+3<outw; j+=4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n"// sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"// r00 r01 r02 r03
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1] \n"// r04 r05 r06 r07
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r10 r11 r12 r13
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v1.s[2] \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"fmla v22.4s, v27.4s, v2.s[3] \n"
"fmla v23.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2] \n"// r14 r15 r16 r17
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"fmla v23.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v3.s[3] \n"
"fmla v23.4s, v19.4s, v4.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v5.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r20 r21 r22 r23
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3] \n"// r24 r25 r26 r27
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4], #64 \n"// r30 r31 r32 r33
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v1.s[2] \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"fmla v22.4s, v27.4s, v2.s[3] \n"
"fmla v23.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4] \n"// r34 r35 r36 r37
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"fmla v23.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v3.s[3] \n"
"fmla v23.4s, v19.4s, v4.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v5.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"// r40 r41 r42 r43
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%5] \n"// r44 r45 r46 r47
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
// "prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n"// sum0 sum1 sum2 sum3
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"// r00 r01 r02 r03
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #512] \n"
"vldm %1, {d8-d15} \n"// r04 r05 r06 r07
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"// r10 r11 r12 r13
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #512] \n"
"vldm %2, {d8-d15} \n"// r14 r15 r16 r17
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #512] \n"
"vldm %3!, {d0-d7} \n"// r20 r21 r22 r23
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #512] \n"
"vldm %3, {d8-d15} \n"// r24 r25 r26 r27
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #512] \n"
"vldm %4!, {d0-d7} \n"// r30 r31 r32 r33
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #512] \n"
"vldm %4, {d8-d15} \n"// r34 r35 r36 r37
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"// r40 r41 r42 r43
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #512] \n"
"vldm %5, {d8-d15} \n"// r44 r45 r46 r47
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
// "pld [%6, #512] \n"
"vldm %6, {d16-d23} \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j+1<outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v20.4s, v21.4s}, [%0] \n"// sum0 sum1
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"// r00 r01
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmul v22.4s, v16.4s, v0.s[0] \n"
"fmul v23.4s, v16.4s, v1.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v2.4s, v3.4s, v4.4s, v5.4s}, [%1] \n"// r02 r03 r04 r05
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n"// r10 r11
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v1.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v2.4s, v3.4s, v4.4s, v5.4s}, [%2] \n"// r12 r13 r14 r15
"fmla v22.4s, v16.4s, v1.s[0] \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n"// r20 r21
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v1.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v2.4s, v3.4s, v4.4s, v5.4s}, [%3] \n"// r22 r23 r24 r25
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4s, v1.4s}, [%4], #32 \n"// r30 r31
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v1.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v2.4s, v3.4s, v4.4s, v5.4s}, [%4] \n"// r32 r33 r34 r35
"fmla v22.4s, v16.4s, v1.s[0] \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4s, v1.4s}, [%5], #32 \n"// r40 r41
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v1.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v2.4s, v3.4s, v4.4s, v5.4s}, [%5] \n"// r42 r43 r44 r45
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
// "prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n"// sum0 sum1
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"// r00 r01
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmul.f32 q14, q8, d0[0] \n"
"vmul.f32 q15, q8, d2[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #512] \n"
"vldm %1, {d4-d11} \n"// r02 r03 r04 r05
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n"// r10 r11
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d2[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #512] \n"
"vldm %2, {d4-d11} \n"// r12 r13 r14 r15
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"// r20 r21
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d2[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #512] \n"
"vldm %3, {d4-d11} \n"// r22 r23 r24 r25
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #256] \n"
"vld1.f32 {d0-d3}, [%4 :128]! \n"// r30 r31
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d2[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #512] \n"
"vldm %4, {d4-d11} \n"// r32 r33 r34 r35
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5 :128]! \n"// r40 r41
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d2[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #512] \n"
"vldm %5, {d4-d11} \n"// r42 r43 r44 r45
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
// "pld [%6, #512] \n"
"vldm %6, {d16-d23} \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j<outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"// sum0
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1], #16 \n"// r00
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v1.4s, v2.4s, v3.4s, v4.4s}, [%1] \n"// r01 r02 r03 r04
"fmul v21.4s, v16.4s, v0.s[0] \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"// r10
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v1.4s, v2.4s, v3.4s, v4.4s}, [%2] \n"// r11 r12 r13 r14
"fmla v21.4s, v24.4s, v0.s[0] \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3], #16 \n"// r20
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v1.4s, v2.4s, v3.4s, v4.4s}, [%3] \n"// r21 r22 r23 r24
"fmla v21.4s, v16.4s, v0.s[0] \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4s}, [%4], #16 \n"// r30
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v1.4s, v2.4s, v3.4s, v4.4s}, [%4] \n"// r31 r32 r33 r34
"fmla v21.4s, v24.4s, v0.s[0] \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"// r40
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v1.4s, v2.4s, v3.4s, v4.4s}, [%5] \n"// r41 r42 r43 r44
"fmla v21.4s, v16.4s, v0.s[0] \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
// "prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"fadd v23.4s, v22.4s, v23.4s \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"st1 {v20.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d24-d25}, [%0 :128] \n"// sum0
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1 :128]! \n"// r00
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmul.f32 q13, q8, d0[0] \n"
"vmul.f32 q14, q9, d0[1] \n"
"vmul.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #512] \n"
"vldm %1, {d2-d9} \n"// r01 r02 r03 r04
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"// r10
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #512] \n"
"vldm %2, {d2-d9} \n"// r11 r12 r13 r14
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3 :128]! \n"// r20
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #512] \n"
"vldm %3, {d2-d9} \n"// r21 r22 r23 r24
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #128] \n"
"vld1.f32 {d0-d1}, [%4 :128]! \n"// r30
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #512] \n"
"vldm %4, {d2-d9} \n"// r31 r32 r33 r34
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"// r40
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #512] \n"
"vldm %5, {d2-d9} \n"// r41 r42 r43 r44
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
// "pld [%6, #512] \n"
"vldm %6, {d16-d23} \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vadd.f32 q13, q13, q14 \n"
"vadd.f32 q12, q12, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"vst1.f32 {d24-d25}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
r0 += 4*4;
r1 += 4*4;
r2 += 4*4;
r3 += 4*4;
r4 += 4*4;
}
}
}
}
}
|
GB_unaryop__abs_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_fp64
// op(A') function: GB_tran__abs_fp64_fp64
// C type: double
// A type: double
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vanilla_sum_prod3.c | #include "q_incs.h"
#include "vanilla_sum_prod3.h"
extern uint64_t num_ops;
static inline void
_vvmul(
float * restrict X,
double * restrict Y,
uint32_t n,
double * restrict Z
)
{
#pragma omp simd
for ( uint32_t i = 0; i < n; i++ ) {
Z[i] = X[i] * Y[i];
}
// num_ops += n;
}
static inline void
_sum(
double * restrict X,
uint32_t n,
double *ptr_y
)
{
double sum = 0;
#pragma omp simd reduction(+:sum)
for ( uint32_t i = 0; i < n; i++ ) {
sum += X[i];
}
// num_ops += n;
*ptr_y = sum;
}
int
vanilla_sum_prod3(
float **X, /* M vectors of length N */
uint64_t M,
uint64_t N,
double *w, /* vector of length N */
double **A /* M vectors of length M */
)
{
int status = 0;
double *temp1 = NULL, *temp2 = NULL;
int b = N; // block size
temp1 = malloc(b * sizeof(double));
return_if_malloc_failed(temp1);
temp2 = malloc(b * sizeof(double));
return_if_malloc_failed(temp2);
uint32_t nT = sysconf(_SC_NPROCESSORS_ONLN);
// nT = 4; // TODO FIX HARD CODING
#pragma omp parallel for schedule(static) num_threads(nT)
for ( uint64_t i = 0; i < M; i++ ) {
memset(A[i], '\0', M*sizeof(double));
// num_ops += M;
}
#pragma omp parallel for schedule(static, 1) num_threads(nT)
for ( uint64_t i = 0; i < M; i++ ) {
float *Xi = X[i];
double *Ai = A[i];
_vvmul(Xi, w, N, temp1);
double sum = 0;
for ( uint64_t j = i; j < M; j++ ) {
_vvmul(X[j], temp1, N, temp2);
_sum(temp2, N, &sum);
Ai[j] += sum;
}
}
BYE:
free_if_non_null(temp1);
free_if_non_null(temp2);
return status;
}
|
cleaned_t_b_one.c | /**
F.H.P.C. Assingment 2
@file cleaned_t_b_one.cc
@brief thread master fills the array, others makes sum.
@author Pietro Morichetti
@date 17/12/2019
@version 1.1
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#define _GNU_SOURCE
#define N 10000000 // size of the problem
int main(int argc, char **argv){
long int S = 0;
int *array = (int*)malloc(N * sizeof(int));
#if defined(_OPENMP)
if(argc > 1){
omp_set_num_threads(atoi(*(argv + 1))); // set the number of threads
}
#endif
for(int ii = 0; ii < N; ++ii){ // thread 0 fills the array
array[ii] = ii;
}
#pragma omp parallel for reduction(+:S) // everyone adding up and reduct on S
for(int ii = 0; ii < N; ++ii){
S += array[ii];
}
free(array);
return 0;
}
|
house.h | // This is AutoMine/GraphZero implementation
#pragma omp parallel for schedule(dynamic,1) reduction(+:counter)
for(vidType v0 = 0; v0 < g.V(); v0++) {
auto y0 = g.N(v0);
auto y0f0 = bounded(y0,v0);
for(vidType idx1 = 0; idx1 < y0f0.size(); idx1++) {
auto v1 = y0f0.begin()[idx1];
auto y1 = g.N(v1);
auto y0y1 = intersection_set(y0, y1);
VertexSet n0y1; difference_set(n0y1,y1, y0);
auto y0n1 = difference_set(y0, y1);
for(vidType idx2 = 0; idx2 < y0y1.size(); idx2++) {
auto v2 = y0y1.begin()[idx2];
auto y2 = g.N(v2);
auto n0y1n2 = difference_set(n0y1, y2);
auto y0n1n2 = difference_set(y0n1, y2);
for(vidType idx3 = 0; idx3 < n0y1n2.size(); idx3++) {
auto v3 = n0y1n2.begin()[idx3];
auto y3 = g.N(v3);
counter += intersection_num(y0n1n2, y3);
}
}
}
}
|
07_global_loop_reduction.c | #include <stdio.h>
#include <omp.h>
#define MAX_ITS 10000
int main(){
int its_global, i;
its_global = 0;
#pragma omp parallel for reduction(+:its_global)
for (i=0;i<MAX_ITS;++i){
/*Reduction means that its_global is recorded separately
on each thread and then combined between all threads by using
a reduction operator (here +) at the end*/
its_global++;
}
printf("Counter records %i iterations\n", its_global);
}
|
triad-cpu.h | #pragma once
#include <omp.h>
//#include <immintrin.h>
#ifdef __INTEL_COMPILER
#define DECLARE_ALIGNED(p, a) __assume_aligned(p, a)
#elif defined __GNUC__
#define DECLARE_ALIGNED(p, a) p = __builtin_assume_aligned(p, a)
#else
// Ignore if we're using an unsupported compiler
#define DECLARE_ALIGNED(p, a)
#endif
#ifndef __INTEL_COMPILER
#if __STDC_VERSION__ == 201112L
void * _mm_malloc(size_t s, size_t n) { return aligned_alloc(n, s); }
void _mm_free(void* p) { free(p); }
#endif
#endif
double cache_triad(size_t n, size_t nreps)
{
const double scalar = 2.0f;
double tot_mem_bw = 0.0;
#pragma omp parallel reduction(+ : tot_mem_bw)
{
double* restrict a = (double*)_mm_malloc(sizeof(double) * n, 64);
double* restrict b = (double*)_mm_malloc(sizeof(double) * n, 64);
double* restrict c = (double*)_mm_malloc(sizeof(double) * n, 64);
DECLARE_ALIGNED(a, 64);
DECLARE_ALIGNED(b, 64);
DECLARE_ALIGNED(c, 64);
// This should place a,b,c in cache
for (int i = 0; i < n; ++i) {
a[i] = 0.0;
b[i] = 3.0;
c[i] = 2.0;
}
double t0 = omp_get_wtime();
for (int t = 0; t < nreps; ++t) {
#pragma omp simd aligned(a : 64) aligned(b : 64) aligned(c : 64)
for (int i = 0; i < n; ++i) {
a[i] += b[i] + scalar * c[i];
}
}
double t1 = omp_get_wtime();
double mem_bw = (4.0 * sizeof(double) * n) / ((t1 - t0) / nreps) / 1e9;
tot_mem_bw += mem_bw;
_mm_free(a);
_mm_free(b);
_mm_free(c);
}
return tot_mem_bw;
}
|
DRB001-antidep1-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A loop with loop-carried anti-dependence.
Data race pair: a[i+1]@64:10 vs. a[i]@64:5
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i;
int len = 1000;
int a[1000];
#pragma omp parallel for private(i)
for (i=0; i<len; i++)
a[i]= i;
for (i=0;i< len -1 ;i++)
a[i]=a[i+1]+1;
printf ("a[500]=%d\n", a[500] );
return 0;
}
|
GB_binop__le_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__le_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__le_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__le_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_fp32)
// A*D function (colscale): GB (_AxD__le_fp32)
// D*A function (rowscale): GB (_DxB__le_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__le_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__le_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_fp32)
// C=scalar+B GB (_bind1st__le_fp32)
// C=scalar+B' GB (_bind1st_tran__le_fp32)
// C=A+scalar GB (_bind2nd__le_fp32)
// C=A'+scalar GB (_bind2nd_tran__le_fp32)
// C type: bool
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_FP32 || GxB_NO_LE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dettmers_weight_compression.c | // algorithm, implementation is based on "8-Bit Approximations for Parallelism in Deep Learning" by Tim Dettmers
// https://github.com/TimDettmers/clusterNet/blob/master/source/clusterKernels.cu
#include <math.h>
const float tbl_floats[126] = {2.750000021e-06,7.249999726e-06,1.875000089e-05,3.624999954e-05,5.874999624e-05,8.624999464e-05,1.437500032e-04,2.312500001e-04,3.187500115e-04,4.062500084e-04,5.187499919e-04,6.562499912e-04,7.937499322e-04,9.312499315e-04,1.218750025e-03,1.656249980e-03,2.093750052e-03,2.531250007e-03,2.968749963e-03,3.406249918e-03,3.843750106e-03,4.281249829e-03,4.843750037e-03,5.531250034e-03,6.218749564e-03,6.906249560e-03,7.593749557e-03,8.281249553e-03,8.968749084e-03,9.656248614e-03,1.109374966e-02,1.328125037e-02,1.546875015e-02,1.765624993e-02,1.984374970e-02,2.203124948e-02,2.421874925e-02,2.640625089e-02,2.859375067e-02,3.078125045e-02,3.296874836e-02,3.515625000e-02,3.734375164e-02,3.953124955e-02,4.171875119e-02,4.390624911e-02,4.671875015e-02,5.015625060e-02,5.359374732e-02,5.703124776e-02,6.046874821e-02,6.390624493e-02,6.734374911e-02,7.078124583e-02,7.421874255e-02,7.765624672e-02,8.109374344e-02,8.453124017e-02,8.796874434e-02,9.140624106e-02,9.484373778e-02,9.828124195e-02,1.054687500e-01,1.164062470e-01,1.273437440e-01,1.382812560e-01,1.492187530e-01,1.601562500e-01,1.710937470e-01,1.820312440e-01,1.929687560e-01,2.039062530e-01,2.148437500e-01,2.257812470e-01,2.367187440e-01,2.476562560e-01,2.585937381e-01,2.695312500e-01,2.804687619e-01,2.914062440e-01,3.023437560e-01,3.132812381e-01,3.242187500e-01,3.351562619e-01,3.460937440e-01,3.570312560e-01,3.679687381e-01,3.789062500e-01,3.898437619e-01,4.007812440e-01,4.117187560e-01,4.226562381e-01,4.335937500e-01,4.445312619e-01,4.585937560e-01,4.757812321e-01,4.929687381e-01,5.101562142e-01,5.273437500e-01,5.445312262e-01,5.617187023e-01,5.789062381e-01,5.960937142e-01,6.132812500e-01,6.304687262e-01,6.476562023e-01,6.648437381e-01,6.820312142e-01,6.992186904e-01,7.164062262e-01,7.335937023e-01,7.507811785e-01,7.679687142e-01,7.851561904e-01,8.023436666e-01,8.195312023e-01,8.367186785e-01,8.539061546e-01,8.710936904e-01,8.882811666e-01,9.054686427e-01,9.226561785e-01,9.398436546e-01,9.570311308e-01,9.742186666e-01,9.914061427e-01};
const float thres_low = 1.5e-6F;
const float thres_high = 0.995703F;
void compression_8bit(float* src, unsigned char* dst, int size) {
// get max (abs)
float maxval = 1e-20F;//avoid zero division
#pragma omp parallel for reduction(max: maxval)
for (int i = 0; i < size; i++) {
float abssrc = fabsf(src[i]);
if (maxval < abssrc) {
maxval = abssrc;
}
}
#pragma omp parallel for
for (int i = 0; i < size; i++) {
float srcval = src[i];
unsigned char signval = srcval >= 0.0F ? 0 : 128;
float absnumber = fabsf(srcval) / maxval;
unsigned char code = 0;
if (absnumber < thres_low) {
code = 126;
} else if (absnumber > thres_high) {
code = 127;
} else {
int pivot = 63;
int upper_pivot = 125;
int lower_pivot = 0;
for(int j = 32; j > 0; j>>=1)
{
if(absnumber > tbl_floats[pivot])
{
lower_pivot = pivot;
pivot+=j;
}
else
{
upper_pivot = pivot;
pivot-=j;
}
}
if(lower_pivot == pivot)
if(fabsf(tbl_floats[pivot]-absnumber) < (tbl_floats[upper_pivot]-absnumber))
code = pivot;
else
code=upper_pivot;
else
if((tbl_floats[pivot]-absnumber) < fabsf(tbl_floats[lower_pivot]-absnumber))
code=pivot;
else
code=lower_pivot;
}
dst[i] = code + signval;
}
unsigned char *maxval_bytes = (unsigned char*)&maxval;
dst[size+0]=maxval_bytes[0];
dst[size+1]=maxval_bytes[1];
dst[size+2]=maxval_bytes[2];
dst[size+3]=maxval_bytes[3];
}
void decompression_8bit(unsigned char* src, float* dst, int size) {
// table generation
float restore_table_floats[256];
float maxval;
unsigned char *maxval_bytes = (unsigned char*)&maxval;
maxval_bytes[0]=src[size+0];
maxval_bytes[1]=src[size+1];
maxval_bytes[2]=src[size+2];
maxval_bytes[3]=src[size+3];
for (int i = 0; i < 126; i++) {
restore_table_floats[i] = tbl_floats[i] * maxval;
restore_table_floats[i+128] = -(tbl_floats[i] * maxval);
}
restore_table_floats[126] = 0.0F;
restore_table_floats[126+128] = 0.0F;
restore_table_floats[127] = maxval;
restore_table_floats[127+128] = -maxval;
// mapping
#pragma omp parallel for
for (int i = 0; i < size; i++) {
dst[i] = restore_table_floats[src[i]];
}
}
|
GB_binop__bor_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__bor_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__bor_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bor_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int16)
// C=scalar+B GB (_bind1st__bor_int16)
// C=scalar+B' GB (_bind1st_tran__bor_int16)
// C=A+scalar GB (_bind2nd__bor_int16)
// C=A'+scalar GB (_bind2nd_tran__bor_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_INT16 || GxB_NO_BOR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolutiondepthwise_3x3_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + g * 4) : _mm_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m128 _k00 = _mm_loadu_ps(k0);
__m128 _k01 = _mm_loadu_ps(k0 + 4);
__m128 _k02 = _mm_loadu_ps(k0 + 8);
__m128 _k10 = _mm_loadu_ps(k0 + 12);
__m128 _k11 = _mm_loadu_ps(k0 + 16);
__m128 _k12 = _mm_loadu_ps(k0 + 20);
__m128 _k20 = _mm_loadu_ps(k0 + 24);
__m128 _k21 = _mm_loadu_ps(k0 + 28);
__m128 _k22 = _mm_loadu_ps(k0 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
__m128 _sum0 = _bias0;
__m128 _r00 = _mm_loadu_ps(r0);
__m128 _r01 = _mm_loadu_ps(r0 + 4);
__m128 _r02 = _mm_loadu_ps(r0 + 8);
__m128 _r10 = _mm_loadu_ps(r1);
__m128 _r11 = _mm_loadu_ps(r1 + 4);
__m128 _r12 = _mm_loadu_ps(r1 + 8);
__m128 _r20 = _mm_loadu_ps(r2);
__m128 _r21 = _mm_loadu_ps(r2 + 4);
__m128 _r22 = _mm_loadu_ps(r2 + 8);
_sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0);
__m128 _sum1 = _bias0;
__m128 _r03 = _mm_loadu_ps(r0 + 12);
__m128 _r13 = _mm_loadu_ps(r1 + 12);
__m128 _r23 = _mm_loadu_ps(r2 + 12);
_mm_storeu_ps(outptr0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k22, _r23, _sum1);
__m128 _sum2 = _bias0;
__m128 _r04 = _mm_loadu_ps(r0 + 16);
__m128 _r14 = _mm_loadu_ps(r1 + 16);
__m128 _r24 = _mm_loadu_ps(r2 + 16);
_mm_storeu_ps(outptr0 + 4, _sum1);
_sum2 = _mm_comp_fmadd_ps(_k00, _r02, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k01, _r03, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k02, _r04, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k10, _r12, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k11, _r13, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k12, _r14, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k20, _r22, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k21, _r23, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k22, _r24, _sum2);
__m128 _sum3 = _bias0;
__m128 _r05 = _mm_loadu_ps(r0 + 20);
__m128 _r15 = _mm_loadu_ps(r1 + 20);
__m128 _r25 = _mm_loadu_ps(r2 + 20);
_mm_storeu_ps(outptr0 + 8, _sum2);
_sum3 = _mm_comp_fmadd_ps(_k00, _r03, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k01, _r04, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k02, _r05, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k10, _r13, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k11, _r14, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k12, _r15, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k20, _r23, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k21, _r24, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k22, _r25, _sum3);
__m128 _sum4 = _bias0;
__m128 _r06 = _mm_loadu_ps(r0 + 24);
__m128 _r16 = _mm_loadu_ps(r1 + 24);
__m128 _r26 = _mm_loadu_ps(r2 + 24);
_mm_storeu_ps(outptr0 + 12, _sum3);
_sum4 = _mm_comp_fmadd_ps(_k00, _r04, _sum4);
_sum4 = _mm_comp_fmadd_ps(_k01, _r05, _sum4);
_sum4 = _mm_comp_fmadd_ps(_k02, _r06, _sum4);
_sum4 = _mm_comp_fmadd_ps(_k10, _r14, _sum4);
_sum4 = _mm_comp_fmadd_ps(_k11, _r15, _sum4);
_sum4 = _mm_comp_fmadd_ps(_k12, _r16, _sum4);
_sum4 = _mm_comp_fmadd_ps(_k20, _r24, _sum4);
_sum4 = _mm_comp_fmadd_ps(_k21, _r25, _sum4);
_sum4 = _mm_comp_fmadd_ps(_k22, _r26, _sum4);
__m128 _sum5 = _bias0;
__m128 _r07 = _mm_loadu_ps(r0 + 28);
__m128 _r17 = _mm_loadu_ps(r1 + 28);
__m128 _r27 = _mm_loadu_ps(r2 + 28);
_mm_storeu_ps(outptr0 + 16, _sum4);
_sum5 = _mm_comp_fmadd_ps(_k00, _r05, _sum5);
_sum5 = _mm_comp_fmadd_ps(_k01, _r06, _sum5);
_sum5 = _mm_comp_fmadd_ps(_k02, _r07, _sum5);
_sum5 = _mm_comp_fmadd_ps(_k10, _r15, _sum5);
_sum5 = _mm_comp_fmadd_ps(_k11, _r16, _sum5);
_sum5 = _mm_comp_fmadd_ps(_k12, _r17, _sum5);
_sum5 = _mm_comp_fmadd_ps(_k20, _r25, _sum5);
_sum5 = _mm_comp_fmadd_ps(_k21, _r26, _sum5);
_sum5 = _mm_comp_fmadd_ps(_k22, _r27, _sum5);
__m128 _sum6 = _bias0;
__m128 _r08 = _mm_loadu_ps(r0 + 32);
__m128 _r18 = _mm_loadu_ps(r1 + 32);
__m128 _r28 = _mm_loadu_ps(r2 + 32);
_mm_storeu_ps(outptr0 + 20, _sum5);
_sum6 = _mm_comp_fmadd_ps(_k00, _r06, _sum6);
_sum6 = _mm_comp_fmadd_ps(_k01, _r07, _sum6);
_sum6 = _mm_comp_fmadd_ps(_k02, _r08, _sum6);
_sum6 = _mm_comp_fmadd_ps(_k10, _r16, _sum6);
_sum6 = _mm_comp_fmadd_ps(_k11, _r17, _sum6);
_sum6 = _mm_comp_fmadd_ps(_k12, _r18, _sum6);
_sum6 = _mm_comp_fmadd_ps(_k20, _r26, _sum6);
_sum6 = _mm_comp_fmadd_ps(_k21, _r27, _sum6);
_sum6 = _mm_comp_fmadd_ps(_k22, _r28, _sum6);
__m128 _sum7 = _bias0;
__m128 _r09 = _mm_loadu_ps(r0 + 36);
__m128 _r19 = _mm_loadu_ps(r1 + 36);
__m128 _r29 = _mm_loadu_ps(r2 + 36);
_mm_storeu_ps(outptr0 + 24, _sum6);
_sum7 = _mm_comp_fmadd_ps(_k00, _r07, _sum7);
_sum7 = _mm_comp_fmadd_ps(_k01, _r08, _sum7);
_sum7 = _mm_comp_fmadd_ps(_k02, _r09, _sum7);
_sum7 = _mm_comp_fmadd_ps(_k10, _r17, _sum7);
_sum7 = _mm_comp_fmadd_ps(_k11, _r18, _sum7);
_sum7 = _mm_comp_fmadd_ps(_k12, _r19, _sum7);
_sum7 = _mm_comp_fmadd_ps(_k20, _r27, _sum7);
_sum7 = _mm_comp_fmadd_ps(_k21, _r28, _sum7);
_sum7 = _mm_comp_fmadd_ps(_k22, _r29, _sum7);
_mm_storeu_ps(outptr0 + 28, _sum7);
r0 += 32;
r1 += 32;
r2 += 32;
outptr0 += 32;
}
for (; j + 3 < outw; j += 4)
{
__m128 _sum0 = _bias0;
__m128 _r00 = _mm_loadu_ps(r0);
__m128 _r01 = _mm_loadu_ps(r0 + 4);
__m128 _r02 = _mm_loadu_ps(r0 + 8);
__m128 _r10 = _mm_loadu_ps(r1);
__m128 _r11 = _mm_loadu_ps(r1 + 4);
__m128 _r12 = _mm_loadu_ps(r1 + 8);
__m128 _r20 = _mm_loadu_ps(r2);
__m128 _r21 = _mm_loadu_ps(r2 + 4);
__m128 _r22 = _mm_loadu_ps(r2 + 8);
_sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0);
__m128 _sum1 = _bias0;
__m128 _r03 = _mm_loadu_ps(r0 + 12);
__m128 _r13 = _mm_loadu_ps(r1 + 12);
__m128 _r23 = _mm_loadu_ps(r2 + 12);
_mm_storeu_ps(outptr0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k22, _r23, _sum1);
__m128 _sum2 = _bias0;
__m128 _r04 = _mm_loadu_ps(r0 + 16);
__m128 _r14 = _mm_loadu_ps(r1 + 16);
__m128 _r24 = _mm_loadu_ps(r2 + 16);
_mm_storeu_ps(outptr0 + 4, _sum1);
_sum2 = _mm_comp_fmadd_ps(_k00, _r02, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k01, _r03, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k02, _r04, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k10, _r12, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k11, _r13, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k12, _r14, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k20, _r22, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k21, _r23, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k22, _r24, _sum2);
__m128 _sum3 = _bias0;
__m128 _r05 = _mm_loadu_ps(r0 + 20);
__m128 _r15 = _mm_loadu_ps(r1 + 20);
__m128 _r25 = _mm_loadu_ps(r2 + 20);
_mm_storeu_ps(outptr0 + 8, _sum2);
_sum3 = _mm_comp_fmadd_ps(_k00, _r03, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k01, _r04, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k02, _r05, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k10, _r13, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k11, _r14, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k12, _r15, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k20, _r23, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k21, _r24, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k22, _r25, _sum3);
_mm_storeu_ps(outptr0 + 12, _sum3);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 16;
}
for (; j + 1 < outw; j += 2)
{
__m128 _sum0 = _bias0;
__m128 _r00 = _mm_loadu_ps(r0);
__m128 _r01 = _mm_loadu_ps(r0 + 4);
__m128 _r02 = _mm_loadu_ps(r0 + 8);
__m128 _r10 = _mm_loadu_ps(r1);
__m128 _r11 = _mm_loadu_ps(r1 + 4);
__m128 _r12 = _mm_loadu_ps(r1 + 8);
__m128 _r20 = _mm_loadu_ps(r2);
__m128 _r21 = _mm_loadu_ps(r2 + 4);
__m128 _r22 = _mm_loadu_ps(r2 + 8);
_sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0);
__m128 _sum1 = _bias0;
__m128 _r03 = _mm_loadu_ps(r0 + 12);
__m128 _r13 = _mm_loadu_ps(r1 + 12);
__m128 _r23 = _mm_loadu_ps(r2 + 12);
_mm_storeu_ps(outptr0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k22, _r23, _sum1);
_mm_storeu_ps(outptr0 + 4, _sum1);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 8;
}
for (; j < outw; j++)
{
__m128 _sum0 = _bias0;
__m128 _r00 = _mm_loadu_ps(r0);
__m128 _r01 = _mm_loadu_ps(r0 + 4);
__m128 _r02 = _mm_loadu_ps(r0 + 8);
__m128 _r10 = _mm_loadu_ps(r1);
__m128 _r11 = _mm_loadu_ps(r1 + 4);
__m128 _r12 = _mm_loadu_ps(r1 + 8);
__m128 _r20 = _mm_loadu_ps(r2);
__m128 _r21 = _mm_loadu_ps(r2 + 4);
__m128 _r22 = _mm_loadu_ps(r2 + 8);
_sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0);
_mm_storeu_ps(outptr0, _sum0);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 4;
}
r0 += 2 * 4;
r1 += 2 * 4;
r2 += 2 * 4;
}
}
}
static void convdw3x3s2_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + g * 4) : _mm_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m128 _k00 = _mm_loadu_ps(k0);
__m128 _k01 = _mm_loadu_ps(k0 + 4);
__m128 _k02 = _mm_loadu_ps(k0 + 8);
__m128 _k10 = _mm_loadu_ps(k0 + 12);
__m128 _k11 = _mm_loadu_ps(k0 + 16);
__m128 _k12 = _mm_loadu_ps(k0 + 20);
__m128 _k20 = _mm_loadu_ps(k0 + 24);
__m128 _k21 = _mm_loadu_ps(k0 + 28);
__m128 _k22 = _mm_loadu_ps(k0 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m128 _sum0 = _bias0;
__m128 _r00 = _mm_loadu_ps(r0);
__m128 _r01 = _mm_loadu_ps(r0 + 4);
__m128 _r02 = _mm_loadu_ps(r0 + 8);
__m128 _r10 = _mm_loadu_ps(r1);
__m128 _r11 = _mm_loadu_ps(r1 + 4);
__m128 _r12 = _mm_loadu_ps(r1 + 8);
__m128 _r20 = _mm_loadu_ps(r2);
__m128 _r21 = _mm_loadu_ps(r2 + 4);
__m128 _r22 = _mm_loadu_ps(r2 + 8);
_sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0);
__m128 _sum1 = _bias0;
__m128 _r03 = _mm_loadu_ps(r0 + 12);
__m128 _r13 = _mm_loadu_ps(r1 + 12);
__m128 _r23 = _mm_loadu_ps(r2 + 12);
__m128 _r04 = _mm_loadu_ps(r0 + 16);
__m128 _r14 = _mm_loadu_ps(r1 + 16);
__m128 _r24 = _mm_loadu_ps(r2 + 16);
_mm_storeu_ps(outptr0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_k00, _r02, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k01, _r03, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k02, _r04, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k10, _r12, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k11, _r13, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k12, _r14, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k20, _r22, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k21, _r23, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k22, _r24, _sum1);
__m128 _sum2 = _bias0;
__m128 _r05 = _mm_loadu_ps(r0 + 20);
__m128 _r15 = _mm_loadu_ps(r1 + 20);
__m128 _r25 = _mm_loadu_ps(r2 + 20);
__m128 _r06 = _mm_loadu_ps(r0 + 24);
__m128 _r16 = _mm_loadu_ps(r1 + 24);
__m128 _r26 = _mm_loadu_ps(r2 + 24);
_mm_storeu_ps(outptr0 + 4, _sum1);
_sum2 = _mm_comp_fmadd_ps(_k00, _r04, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k01, _r05, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k02, _r06, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k10, _r14, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k11, _r15, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k12, _r16, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k20, _r24, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k21, _r25, _sum2);
_sum2 = _mm_comp_fmadd_ps(_k22, _r26, _sum2);
__m128 _sum3 = _bias0;
__m128 _r07 = _mm_loadu_ps(r0 + 28);
__m128 _r17 = _mm_loadu_ps(r1 + 28);
__m128 _r27 = _mm_loadu_ps(r2 + 28);
__m128 _r08 = _mm_loadu_ps(r0 + 32);
__m128 _r18 = _mm_loadu_ps(r1 + 32);
__m128 _r28 = _mm_loadu_ps(r2 + 32);
_mm_storeu_ps(outptr0 + 8, _sum2);
_sum3 = _mm_comp_fmadd_ps(_k00, _r06, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k01, _r07, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k02, _r08, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k10, _r16, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k11, _r17, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k12, _r18, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k20, _r26, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k21, _r27, _sum3);
_sum3 = _mm_comp_fmadd_ps(_k22, _r28, _sum3);
_mm_storeu_ps(outptr0 + 12, _sum3);
r0 += 2 * 16;
r1 += 2 * 16;
r2 += 2 * 16;
outptr0 += 16;
}
for (; j + 1 < outw; j += 2)
{
__m128 _sum0 = _bias0;
__m128 _r00 = _mm_loadu_ps(r0);
__m128 _r01 = _mm_loadu_ps(r0 + 4);
__m128 _r02 = _mm_loadu_ps(r0 + 8);
__m128 _r10 = _mm_loadu_ps(r1);
__m128 _r11 = _mm_loadu_ps(r1 + 4);
__m128 _r12 = _mm_loadu_ps(r1 + 8);
__m128 _r20 = _mm_loadu_ps(r2);
__m128 _r21 = _mm_loadu_ps(r2 + 4);
__m128 _r22 = _mm_loadu_ps(r2 + 8);
_sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0);
__m128 _sum1 = _bias0;
__m128 _r03 = _mm_loadu_ps(r0 + 12);
__m128 _r13 = _mm_loadu_ps(r1 + 12);
__m128 _r23 = _mm_loadu_ps(r2 + 12);
__m128 _r04 = _mm_loadu_ps(r0 + 16);
__m128 _r14 = _mm_loadu_ps(r1 + 16);
__m128 _r24 = _mm_loadu_ps(r2 + 16);
_mm_storeu_ps(outptr0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_k00, _r02, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k01, _r03, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k02, _r04, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k10, _r12, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k11, _r13, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k12, _r14, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k20, _r22, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k21, _r23, _sum1);
_sum1 = _mm_comp_fmadd_ps(_k22, _r24, _sum1);
_mm_storeu_ps(outptr0 + 4, _sum1);
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
outptr0 += 8;
}
for (; j < outw; j++)
{
__m128 _sum0 = _bias0;
__m128 _r00 = _mm_loadu_ps(r0);
__m128 _r01 = _mm_loadu_ps(r0 + 4);
__m128 _r02 = _mm_loadu_ps(r0 + 8);
__m128 _r10 = _mm_loadu_ps(r1);
__m128 _r11 = _mm_loadu_ps(r1 + 4);
__m128 _r12 = _mm_loadu_ps(r1 + 8);
__m128 _r20 = _mm_loadu_ps(r2);
__m128 _r21 = _mm_loadu_ps(r2 + 4);
__m128 _r22 = _mm_loadu_ps(r2 + 8);
_sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0);
_mm_storeu_ps(outptr0, _sum0);
r0 += 2 * 4;
r1 += 2 * 4;
r2 += 2 * 4;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
GB_AxB_saxpy3.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy3: compute C=A*B, C<M>=A*B, or C<!M>=A*B in parallel
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// GB_AxB_saxpy3 computes C=A*B, C<M>=A*B, or C<!M>=A*B in parallel. If the
// mask matrix M has too many entries compared to the work to compute A*B, then
// it is not applied. Instead, M is ignored and C=A*B is computed. The mask
// is applied later, in GB_mxm.
// For simplicity, this discussion and all comments in this code assume that
// all matrices are in CSC format, but the algorithm is CSR/CSC agnostic.
// The matrix B is split into two kinds of tasks: coarse and fine. A coarse
// task computes C(:,j1:j2) = A*B(:,j1:j2), for a unique set of vectors j1:j2.
// Those vectors are not shared with any other tasks. A fine task works with a
// team of other fine tasks to compute C(:,j) for a single vector j. Each fine
// task computes A*B(k1:k2,j) for a unique range k1:k2, and sums its results
// into C(:,j) via atomic operations.
// Each coarse or fine task uses either Gustavson's method [1] or the Hash
// method [2]. There are 4 kinds of tasks:
// fine Gustavson task
// fine hash task
// coarse Gustason task
// coarse hash task
// Each of the 4 kinds tasks are then subdivided into 3 variants, for C=A*B,
// C<M>=A*B, and C<!M>=A*B, giving a total of 12 different types of tasks.
// Fine tasks are used when there would otherwise be too much work for a single
// task to compute the single vector C(:,j). Fine tasks share all of their
// workspace with the team of fine tasks computing C(:,j). Coarse tasks are
// prefered since they require less synchronization, but fine tasks allow for
// better parallelization when B has only a few vectors. If B consists of a
// single vector (for GrB_mxv if A is in CSC format and not transposed, or
// for GrB_vxm if A is in CSR format and not transpose), then the only way to
// get parallelism is via fine tasks. If a single thread is used for this
// case, a single-vector coarse task is used.
// To select between the Hash method or Gustavson's method for each task, the
// hash table size is first found. The hash table size for a hash task depends
// on the maximum flop count for any vector in that task (which is just one
// vector for the fine tasks). It is set to twice the smallest power of 2 that
// is greater than the flop count to compute that vector (plus the # of entries
// in M(:,j) for tasks that compute C<M>=A*B or C<!M>=A*B). This size ensures
// the results will fit in the hash table, and with hopefully only a modest
// number of collisions. If the hash table size exceeds a threshold (currently
// m/16 if C is m-by-n), then Gustavson's method is used instead, and the hash
// table size is set to m, to serve as the gather/scatter workspace for
// Gustavson's method.
// The workspace allocated depends on the type of task. Let s be the hash
// table size for the task, and C is m-by-n (assuming all matrices are CSC; if
// CSR, then m is replaced with n).
//
// fine Gustavson task (shared): uint8_t Hf [m] ; ctype Hx [m] ;
// fine hash task (shared): int64_t Hf [s] ; ctype Hx [s] ;
// coarse Gustavson task: int64_t Hf [m] ; ctype Hx [m] ;
// coarse hash task: int64_t Hf [s] ; ctype Hx [s] ;
// int64_t Hi [s] ;
//
// Note that the Hi array is needed only for the coarse hash task. Additional
// workspace is allocated to construct the list of tasks, but this is freed
// before C is constructed.
// References:
// [1] Fred G. Gustavson. 1978. Two Fast Algorithms for Sparse Matrices:
// Multiplication and Permuted Transposition. ACM Trans. Math. Softw. 4, 3
// (Sept. 1978), 250–269. DOI:https://doi.org/10.1145/355791.355796
// [2] Yusuke Nagasaka, Satoshi Matsuoka, Ariful Azad, and Aydın Buluç. 2018.
// High-Performance Sparse Matrix-Matrix Products on Intel KNL and Multicore
// Architectures. In Proc. 47th Intl. Conf. on Parallel Processing (ICPP '18).
// Association for Computing Machinery, New York, NY, USA, Article 34, 1–10.
// DOI:https://doi.org/10.1145/3229710.3229720
//------------------------------------------------------------------------------
#include "GB_mxm.h"
#include "GB_AxB_saxpy3.h"
#ifndef GBCOMPACT
#include "GB_AxB__include.h"
#endif
//------------------------------------------------------------------------------
// control parameters for generating parallel tasks
//------------------------------------------------------------------------------
#define GB_NTASKS_PER_THREAD 2
#define GB_COSTLY 1.2
#define GB_FINE_WORK 2
#define GB_MWORK_ALPHA 0.01
//------------------------------------------------------------------------------
// free workspace
//------------------------------------------------------------------------------
// This workspace is not needed in the GB_Asaxpy3B* worker functions.
#define GB_FREE_INITIAL_WORK \
{ \
GB_FREE_MEMORY (Bflops2, max_bjnz+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Coarse_initial, ntasks_initial+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Fine_slice, ntasks+1, sizeof (int64_t)) ; \
}
#define GB_FREE_WORK \
{ \
GB_FREE_INITIAL_WORK ; \
GB_FREE_MEMORY (TaskList, ntasks, sizeof (GB_saxpy3task_struct)) ; \
GB_FREE_MEMORY (Hi_all, Hi_size_total, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Hf_all, Hf_size_total, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Hx_all, Hx_size_total, 1) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_MATRIX_FREE (Chandle) ; \
}
//------------------------------------------------------------------------------
// GB_hash_table_size
//------------------------------------------------------------------------------
// flmax is the max flop count for computing A*B(:,j), for any vector j that
// this task computes. If the mask M is present, flmax also includes the
// number of entries in M(:,j). GB_hash_table_size determines the hash table
// size for this task, which is twice the smallest power of 2 larger than
// flmax. If flmax is large enough, the hash_size is returned as cvlen, so
// that Gustavson's method will be used instead of the Hash method.
// By default, Gustavson vs Hash is selected automatically. AxB_method can be
// selected via the descriptor or a global setting, as the non-default
// GxB_AxB_GUSTAVSON or GxB_AxB_HASH settings, to enforce the selection of
// either of those methods. However, if Hash is selected by the hash table
// exceeds cvlen, then Gustavson's method is used instead.
static inline int64_t GB_hash_table_size
(
int64_t flmax, // max flop count for any vector computed by this task
int64_t cvlen, // vector length of C
const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash
)
{
// hash_size = 2 * (smallest power of 2 >= flmax)
double hlog = log2 ((double) flmax) ;
int64_t hash_size = ((int64_t) 2) << ((int64_t) floor (hlog) + 1) ;
bool use_Gustavson ;
if (AxB_method == GxB_AxB_GUSTAVSON)
{
// always use Gustavson's method
use_Gustavson = true ;
}
else if (AxB_method == GxB_AxB_HASH)
{
// always use Hash method, unless the hash_size >= cvlen
use_Gustavson = (hash_size >= cvlen) ;
}
else
{
// default: auto selection:
// use Gustavson's method if hash_size is too big
use_Gustavson = (hash_size >= cvlen/16) ;
}
if (use_Gustavson)
{
hash_size = cvlen ;
}
return (hash_size) ;
}
//------------------------------------------------------------------------------
// GB_create_coarse_task: create a single coarse task
//------------------------------------------------------------------------------
// Compute the max flop count for any vector in a coarse task, determine the
// hash table size, and construct the coarse task.
static inline void GB_create_coarse_task
(
int64_t kfirst, // coarse task consists of vectors kfirst:klast
int64_t klast,
GB_saxpy3task_struct *TaskList,
int taskid, // taskid for this coarse task
int64_t *Bflops, // size bnvec; cum sum of flop counts for vectors of B
int64_t cvlen, // vector length of B and C
double chunk,
int nthreads_max,
const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash
)
{
// find the max # of flops for any vector in this task
int64_t flmax = 1 ;
int nth = GB_nthreads (klast-kfirst+1, chunk, nthreads_max) ;
int64_t kk ;
#pragma omp parallel for num_threads(nth) schedule(static) \
reduction(max:flmax)
for (kk = kfirst ; kk <= klast ; kk++)
{
int64_t fl = Bflops [kk+1] - Bflops [kk] ;
flmax = GB_IMAX (flmax, fl) ;
}
// define the coarse task
TaskList [taskid].start = kfirst ;
TaskList [taskid].end = klast ;
TaskList [taskid].vector = -1 ;
TaskList [taskid].hsize = GB_hash_table_size (flmax, cvlen, AxB_method) ;
TaskList [taskid].Hi = NULL ; // assigned later
TaskList [taskid].Hf = NULL ; // assigned later
TaskList [taskid].Hx = NULL ; // assigned later
TaskList [taskid].my_cjnz = 0 ; // unused
TaskList [taskid].flops = Bflops [klast+1] - Bflops [kfirst] ;
TaskList [taskid].master = taskid ;
TaskList [taskid].team_size = 1 ;
}
//------------------------------------------------------------------------------
// GB_AxB_saxpy3: compute C=A*B, C<M>=A*B, or C<!M>=A*B in parallel
//------------------------------------------------------------------------------
GrB_Info GB_AxB_saxpy3 // C = A*B using Gustavson+Hash
(
GrB_Matrix *Chandle, // output matrix
const GrB_Matrix M_input, // optional mask matrix
const bool Mask_comp_input, // if true, use !M
const bool Mask_struct, // if true, use the only structure of M
const GrB_Matrix A, // input matrix A
const GrB_Matrix B, // input matrix B
const GrB_Semiring semiring, // semiring that defines C=A*B
const bool flipxy, // if true, do z=fmult(b,a) vs fmult(a,b)
bool *mask_applied, // if true, then mask was applied
const GrB_Desc_Value AxB_method, // Default, Gustavson, or Hash
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Matrix M = M_input ; // use the mask M, until deciding otherwise
bool Mask_comp = Mask_comp_input ;
(*mask_applied) = false ;
ASSERT (Chandle != NULL) ;
ASSERT (*Chandle == NULL) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for saxpy3 A*B", GB0) ;
ASSERT_MATRIX_OK (A, "A for saxpy3 A*B", GB0) ;
ASSERT_MATRIX_OK (B, "B for saxpy3 A*B", GB0) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ;
ASSERT_SEMIRING_OK (semiring, "semiring for saxpy3 A*B", GB0) ;
ASSERT (A->vdim == B->vlen) ;
int64_t *GB_RESTRICT Hi_all = NULL ;
int64_t *GB_RESTRICT Hf_all = NULL ;
GB_void *GB_RESTRICT Hx_all = NULL ;
int64_t *GB_RESTRICT Coarse_initial = NULL ; // initial coarse tasks
GB_saxpy3task_struct *GB_RESTRICT TaskList = NULL ;
int64_t *GB_RESTRICT Fine_slice = NULL ;
int64_t *GB_RESTRICT Bflops2 = NULL ;
int ntasks = 0 ;
int ntasks_initial = 0 ;
size_t Hi_size_total = 0 ;
size_t Hf_size_total = 0 ;
size_t Hx_size_total = 0 ;
int64_t max_bjnz = 0 ;
//--------------------------------------------------------------------------
// get the semiring operators
//--------------------------------------------------------------------------
GrB_BinaryOp mult = semiring->multiply ;
GrB_Monoid add = semiring->add ;
ASSERT (mult->ztype == add->op->ztype) ;
bool op_is_first = mult->opcode == GB_FIRST_opcode ;
bool op_is_second = mult->opcode == GB_SECOND_opcode ;
bool op_is_pair = mult->opcode == GB_PAIR_opcode ;
bool A_is_pattern = false ;
bool B_is_pattern = false ;
if (flipxy)
{
// z = fmult (b,a) will be computed
A_is_pattern = op_is_first || op_is_pair ;
B_is_pattern = op_is_second || op_is_pair ;
ASSERT (GB_IMPLIES (!A_is_pattern,
GB_Type_compatible (A->type, mult->ytype))) ;
ASSERT (GB_IMPLIES (!B_is_pattern,
GB_Type_compatible (B->type, mult->xtype))) ;
}
else
{
// z = fmult (a,b) will be computed
A_is_pattern = op_is_second || op_is_pair ;
B_is_pattern = op_is_first || op_is_pair ;
ASSERT (GB_IMPLIES (!A_is_pattern,
GB_Type_compatible (A->type, mult->xtype))) ;
ASSERT (GB_IMPLIES (!B_is_pattern,
GB_Type_compatible (B->type, mult->ytype))) ;
}
#ifdef GBCOMPACT
bool is_any_pair_semiring = false ;
#else
GB_Opcode mult_opcode, add_opcode ;
GB_Type_code xycode, zcode ;
bool builtin_semiring = GB_AxB_semiring_builtin (A, A_is_pattern, B,
B_is_pattern, semiring, flipxy, &mult_opcode, &add_opcode, &xycode,
&zcode) ;
bool is_any_pair_semiring = builtin_semiring
&& (add_opcode == GB_ANY_opcode)
&& (mult_opcode == GB_PAIR_opcode) ;
#endif
(*Chandle) = NULL ;
//--------------------------------------------------------------------------
// get A, and B
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ai = A->i ;
const int64_t avlen = A->vlen ;
const int64_t anvec = A->nvec ;
const bool A_is_hyper = A->is_hyper ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bh = B->h ;
const int64_t *GB_RESTRICT Bi = B->i ;
const int64_t bvdim = B->vdim ;
const int64_t bnz = GB_NNZ (B) ;
const int64_t bnvec = B->nvec ;
const bool B_is_hyper = B->is_hyper ;
//--------------------------------------------------------------------------
// determine the # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// allocate C (just C->p and C->h, but not C->i or C->x)
//--------------------------------------------------------------------------
GrB_Type ctype = add->op->ztype ;
size_t csize = ctype->size ;
int64_t cvlen = avlen ;
int64_t cvdim = bvdim ;
int64_t cnvec = bnvec ;
// calloc Cp so it can be used as the Bflops workspace
GB_NEW (Chandle, ctype, cvlen, cvdim, GB_Ap_calloc, true,
GB_SAME_HYPER_AS (B_is_hyper), B->hyper_ratio, cnvec, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_ALL ;
return (info) ;
}
GrB_Matrix C = (*Chandle) ;
int64_t *GB_RESTRICT Cp = C->p ;
int64_t *GB_RESTRICT Ch = C->h ;
if (B_is_hyper)
{
// C has the same set of vectors as B
int nth = GB_nthreads (cnvec, chunk, nthreads_max) ;
GB_memcpy (Ch, Bh, cnvec * sizeof (int64_t), nth) ;
C->nvec = bnvec ;
}
//==========================================================================
// phase0: create parallel tasks
//==========================================================================
//--------------------------------------------------------------------------
// compute flop counts for each vector of B and C
//--------------------------------------------------------------------------
int64_t Mwork = 0 ;
int64_t *GB_RESTRICT Bflops = Cp ; // Cp is used as workspace for Bflops
GB_OK (GB_AxB_flopcount (&Mwork, Bflops, M, Mask_comp, A, B, Context)) ;
int64_t total_flops = Bflops [bnvec] ;
//--------------------------------------------------------------------------
// determine if the mask M should be applied, or done later
//--------------------------------------------------------------------------
// If M is very large as compared to A*B, then it is too costly to apply
// during the computation of A*B. In this case, compute C=A*B, ignoring
// the mask. Tell the caller that the mask was not applied, so that it
// will be applied later in GB_mxm.
double axbflops = total_flops - Mwork ;
GBBURBLE ("axbflops %g Mwork %g ", axbflops, (double) Mwork) ;
if ((M != NULL) && (axbflops < ((double) Mwork * GB_MWORK_ALPHA)))
{
// M is present but costly to use. Do not use it during the
// computation of A*B. Instead, compute C=A*B and then apply the mask
// later.
M = NULL ;
Mask_comp = false ;
int nth = GB_nthreads (bnvec, chunk, nthreads_max) ;
int64_t kk ;
// GB_AxB_flopcount requires Bflops be set to zero here
#pragma omp parallel for num_threads(nth) schedule(static)
for (kk = 0 ; kk <= bnvec ; kk++)
{
Bflops [kk] = 0 ;
}
// redo the flop count analysis, without the mask
GB_OK (GB_AxB_flopcount (&Mwork, Bflops, NULL, false, A, B, Context)) ;
total_flops = Bflops [bnvec] ;
GBBURBLE ("(discard mask) ") ;
}
else if (M != NULL)
{
GBBURBLE ("(use mask) ") ;
}
//--------------------------------------------------------------------------
// get M
//--------------------------------------------------------------------------
bool mask_is_M = (M != NULL && !Mask_comp) ;
const int64_t *GB_RESTRICT Mp = NULL ;
const int64_t *GB_RESTRICT Mh = NULL ;
const int64_t *GB_RESTRICT Mi = NULL ;
// const GB_void *GB_RESTRICT Mx = NULL ;
// size_t msize = 0 ;
int64_t mnvec = 0 ;
bool M_is_hyper = false ;
if (M != NULL)
{
Mp = M->p ;
Mh = M->h ;
Mi = M->i ;
// Mx = M->x ;
// msize = M->type->size ;
mnvec = M->nvec ;
M_is_hyper = M->is_hyper ;
}
//--------------------------------------------------------------------------
// determine # of threads and # of initial coarse tasks
//--------------------------------------------------------------------------
int nthreads = GB_nthreads ((double) total_flops, chunk, nthreads_max) ;
ntasks_initial = (nthreads == 1) ? 1 : (GB_NTASKS_PER_THREAD * nthreads) ;
double target_task_size = ((double) total_flops) / ntasks_initial ;
target_task_size = GB_IMAX (target_task_size, chunk) ;
double target_fine_size = target_task_size / GB_FINE_WORK ;
target_fine_size = GB_IMAX (target_fine_size, chunk) ;
//--------------------------------------------------------------------------
// determine # of parallel tasks
//--------------------------------------------------------------------------
int nfine = 0 ; // # of fine tasks
int ncoarse = 0 ; // # of coarse tasks
max_bjnz = 0 ; // max (nnz (B (:,j))) of fine tasks
// FUTURE: also use ultra-fine tasks that compute A(i1:i2,k)*B(k,j)
if (ntasks_initial > 1)
{
//----------------------------------------------------------------------
// construct initial coarse tasks
//----------------------------------------------------------------------
if (!GB_pslice (&Coarse_initial, Bflops, bnvec, ntasks_initial))
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// split the work into coarse and fine tasks
//----------------------------------------------------------------------
for (int taskid = 0 ; taskid < ntasks_initial ; taskid++)
{
// get the initial coarse task
int64_t kfirst = Coarse_initial [taskid] ;
int64_t klast = Coarse_initial [taskid+1] ;
int64_t task_ncols = klast - kfirst ;
int64_t task_flops = Bflops [klast] - Bflops [kfirst] ;
if (task_ncols == 0)
{
// This coarse task is empty, having been squeezed out by
// costly vectors in adjacent coarse tasks.
}
else if (task_flops > 2 * GB_COSTLY * target_task_size)
{
// This coarse task is too costly, because it contains one or
// more costly vectors. Split its vectors into a mixture of
// coarse and fine tasks.
int64_t kcoarse_start = kfirst ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
// jflops = # of flops to compute a single vector A*B(:,j)
// where j == (Bh == NULL) ? kk : Bh [kk].
double jflops = Bflops [kk+1] - Bflops [kk] ;
// bjnz = nnz (B (:,j))
int64_t bjnz = Bp [kk+1] - Bp [kk] ;
if (jflops > GB_COSTLY * target_task_size && bjnz > 1)
{
// A*B(:,j) is costly; split it into 2 or more fine
// tasks. First flush the prior coarse task, if any.
if (kcoarse_start < kk)
{
// vectors kcoarse_start to kk-1 form a single
// coarse task
ncoarse++ ;
}
// next coarse task (if any) starts at kk+1
kcoarse_start = kk+1 ;
// vectors kk will be split into multiple fine tasks
max_bjnz = GB_IMAX (max_bjnz, bjnz) ;
int team_size = ceil (jflops / target_fine_size) ;
nfine += team_size ;
}
}
// flush the last coarse task, if any
if (kcoarse_start < klast)
{
// vectors kcoarse_start to klast-1 form a single
// coarse task
ncoarse++ ;
}
}
else
{
// This coarse task is OK as-is.
ncoarse++ ;
}
}
}
else
{
//----------------------------------------------------------------------
// entire computation in a single fine or coarse task
//----------------------------------------------------------------------
if (bnvec == 1)
{
// If B is a single vector, and is computed by a single thread,
// then a single fine task is used.
nfine = 1 ;
ncoarse = 0 ;
}
else
{
// One thread uses a single coarse task if B is not a vector.
nfine = 0 ;
ncoarse = 1 ;
}
}
ntasks = ncoarse + nfine ;
//--------------------------------------------------------------------------
// allocate the tasks, and workspace to construct fine tasks
//--------------------------------------------------------------------------
GB_CALLOC_MEMORY (TaskList, ntasks, sizeof (GB_saxpy3task_struct)) ;
if (max_bjnz > 0)
{
// also allocate workspace to construct fine tasks
GB_MALLOC_MEMORY (Fine_slice, ntasks+1, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (Bflops2, max_bjnz+1, sizeof (int64_t)) ;
}
if (TaskList == NULL ||
(max_bjnz > 0 && (Fine_slice == NULL || Bflops2 == NULL)))
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// create the tasks
//--------------------------------------------------------------------------
if (ntasks_initial > 1)
{
//----------------------------------------------------------------------
// create the coarse and fine tasks
//----------------------------------------------------------------------
int nf = 0 ; // fine tasks have task id 0:nfine-1
int nc = nfine ; // coarse task ids are nfine:ntasks-1
for (int taskid = 0 ; taskid < ntasks_initial ; taskid++)
{
// get the initial coarse task
int64_t kfirst = Coarse_initial [taskid] ;
int64_t klast = Coarse_initial [taskid+1] ;
int64_t task_ncols = klast - kfirst ;
int64_t task_flops = Bflops [klast] - Bflops [kfirst] ;
if (task_ncols == 0)
{
// This coarse task is empty, having been squeezed out by
// costly vectors in adjacent coarse tasks.
}
else if (task_flops > 2 * GB_COSTLY * target_task_size)
{
// This coarse task is too costly, because it contains one or
// more costly vectors. Split its vectors into a mixture of
// coarse and fine tasks.
int64_t kcoarse_start = kfirst ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
// jflops = # of flops to compute a single vector A*B(:,j)
double jflops = Bflops [kk+1] - Bflops [kk] ;
// bjnz = nnz (B (:,j))
int64_t bjnz = Bp [kk+1] - Bp [kk] ;
if (jflops > GB_COSTLY * target_task_size && bjnz > 1)
{
// A*B(:,j) is costly; split it into 2 or more fine
// tasks. First flush the prior coarse task, if any.
if (kcoarse_start < kk)
{
// kcoarse_start:kk-1 form a single coarse task
GB_create_coarse_task (kcoarse_start, kk-1,
TaskList, nc++, Bflops, cvlen,
chunk, nthreads_max, AxB_method) ;
}
// next coarse task (if any) starts at kk+1
kcoarse_start = kk+1 ;
// get the mask M(:,j), for C<M>=A*B
int64_t im_first = -1, im_last = -1 ;
if (mask_is_M)
{
int64_t j = (Bh == NULL) ? kk : Bh [kk] ;
int64_t mpleft = 0 ;
int64_t mpright = mnvec-1 ;
int64_t pM, pM_end ;
GB_lookup (M_is_hyper, Mh, Mp, &mpleft, mpright, j,
&pM, &pM_end) ;
int64_t mjnz = pM_end - pM ; // nnz (M (:,j))
// For C<M>=A*B, if M(:,j) is empty, then there
// would be no flops to compute C(:,j), and thus
// no fine tasks constructed for C(:,j).
// Thus mjnz > 0 must hold.
ASSERT (mjnz > 0) ;
if (mjnz > 0) // but check anyway, just to be safe
{
im_first = Mi [pM] ;
im_last = Mi [pM_end-1] ;
}
}
// count the work for each entry B(k,j). Do not
// include the work to scan M(:,j), since that will
// be evenly divided between all tasks in this team.
// Do check if M(:,j) and A(:,k) are disjoint, for
// C<M>=A*B, when accounting for the flops for B(k,j).
int64_t pB_start = Bp [kk] ;
int nth = GB_nthreads (bjnz, chunk, nthreads_max) ;
int64_t s ;
#pragma omp parallel for num_threads(nth) \
schedule(static)
for (s = 0 ; s < bjnz ; s++)
{
// get B(k,j)
int64_t k = Bi [pB_start + s] ;
// fl = flop count for just A(:,k)*B(k,j)
int64_t pA, pA_end ;
int64_t pleft = 0 ;
GB_lookup (A_is_hyper, Ah, Ap, &pleft, anvec-1, k,
&pA, &pA_end) ;
int64_t fl = pA_end - pA ;
if (mask_is_M && fl > 0)
{
// no work if A(:,k) and M(:,j) disjoint
int64_t alo = Ai [pA] ; // get first A(:,k)
int64_t ahi = Ai [pA_end-1] ;// get last A(:,k)
if (ahi < im_first || alo > im_last) fl = 0 ;
}
Bflops2 [s] = fl ;
ASSERT (fl >= 0) ;
}
// cumulative sum of flops to compute A*B(:,j)
GB_cumsum (Bflops2, bjnz, NULL, nth) ;
// slice B(:,j) into fine tasks
int team_size = ceil (jflops / target_fine_size) ;
ASSERT (Fine_slice != NULL) ;
GB_pslice (&Fine_slice, Bflops2, bjnz, team_size) ;
// shared hash table for all fine tasks for A*B(:,j)
int64_t hsize =
GB_hash_table_size (jflops, cvlen, AxB_method) ;
// construct the fine tasks for C(:,j)=A*B(:,j)
int master = nf ;
for (int fid = 0 ; fid < team_size ; fid++)
{
int64_t pstart = Fine_slice [fid] ;
int64_t pend = Fine_slice [fid+1] ;
int64_t fl = Bflops2 [pend] - Bflops2 [pstart] ;
TaskList [nf].start = pB_start + pstart ;
TaskList [nf].end = pB_start + pend - 1 ;
TaskList [nf].vector = kk ;
TaskList [nf].hsize = hsize ;
TaskList [nf].Hi = NULL ; // assigned later
TaskList [nf].Hf = NULL ; // assigned later
TaskList [nf].Hx = NULL ; // assigned later
TaskList [nf].my_cjnz = 0 ;
TaskList [nf].flops = fl ;
TaskList [nf].master = master ;
TaskList [nf].team_size = team_size ;
nf++ ;
}
}
}
// flush the last coarse task, if any
if (kcoarse_start < klast)
{
// kcoarse_start:klast-1 form a single coarse task
GB_create_coarse_task (kcoarse_start, klast-1, TaskList,
nc++, Bflops, cvlen, chunk, nthreads_max, AxB_method) ;
}
}
else
{
// This coarse task is OK as-is.
GB_create_coarse_task (kfirst, klast-1, TaskList, nc++, Bflops,
cvlen, chunk, nthreads_max, AxB_method) ;
}
}
}
else
{
//----------------------------------------------------------------------
// entire computation in a single fine or coarse task
//----------------------------------------------------------------------
// create a single coarse task
GB_create_coarse_task (0, bnvec-1, TaskList, 0, Bflops, cvlen, 1, 1,
AxB_method) ;
if (bnvec == 1)
{
// convert the single coarse task into a single fine task
TaskList [0].start = 0 ; // first entry in B(:,0)
TaskList [0].end = bnz - 1 ; // last entry in B(:,0)
TaskList [0].vector = 0 ;
}
}
//--------------------------------------------------------------------------
// free workspace used to create the tasks
//--------------------------------------------------------------------------
// Frees Bflops2, Coarse_initial, and Fine_slice. These do not need to
// be freed in the GB_Asaxpy3B worker below.
GB_FREE_INITIAL_WORK ;
//--------------------------------------------------------------------------
#if GB_BURBLE
int nfine_hash = 0 ;
int nfine_gus = 0 ;
int ncoarse_hash = 0 ;
int ncoarse_1hash = 0 ;
int ncoarse_gus = 0 ;
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
int64_t hash_size = TaskList [taskid].hsize ;
bool is_fine = (taskid < nfine) ;
bool use_Gustavson = (hash_size == cvlen) ;
if (is_fine)
{
// fine task
if (use_Gustavson)
{
// fine Gustavson task
nfine_gus++ ;
}
else
{
// fine hash task
nfine_hash++ ;
}
}
else
{
// coarse task
int64_t kfirst = TaskList [taskid].start ;
int64_t klast = TaskList [taskid].end ;
if (use_Gustavson)
{
// coarse Gustavson task
ncoarse_gus++ ;
}
else
{
// hash task
ncoarse_hash++ ;
}
}
}
GBBURBLE ("nthreads %d ntasks %d coarse: (gus: %d hash: %d)"
" fine: (gus: %d hash: %d) ", nthreads, ntasks,
ncoarse_gus, ncoarse_hash, nfine_gus, nfine_hash) ;
#endif
// Bflops is no longer needed as an alias for Cp
Bflops = NULL ;
//--------------------------------------------------------------------------
// allocate the hash tables
//--------------------------------------------------------------------------
// If Gustavson's method is used (coarse tasks):
//
// hash_size is cvlen.
// Hi is not allocated.
// Hf and Hx are both of size hash_size.
//
// (Hf [i] == mark) is true if i is in the hash table.
// Hx [i] is the value of C(i,j) during the numeric phase.
//
// Gustavson's method is used if the hash_size for the Hash method
// is a significant fraction of cvlen.
//
// If the Hash method is used (coarse tasks):
//
// hash_size is 2 times the smallest power of 2 that is larger than
// the # of flops required for any column C(:,j) being computed. This
// ensures that all entries have space in the hash table, and that the
// hash occupancy will never be more than 50%. It is always smaller
// than cvlen (otherwise, Gustavson's method is used).
//
// A hash function is used for the ith entry:
// hash = (i * GB_HASH_FACTOR) & (hash_size-1)
// If a collision occurs, linear probing is used:
// hash = (hash + 1) & (hashsize-1)
//
// (Hf [hash] == mark) is true if the position is occupied.
// i = Hi [hash] gives the row index i that occupies that position.
// Hx [hash] is the value of C(i,j) during the numeric phase.
//
// For both coarse methods:
//
// Hf starts out all zero (via calloc), and mark starts out as 1. To
// clear all of Hf, mark is incremented, so that all entries in Hf are
// not equal to mark.
// add some padding to the end of each hash table, to avoid false
// sharing of cache lines between the hash tables.
size_t hx_pad = 64 ;
size_t hi_pad = 64 / sizeof (int64_t) ;
Hi_size_total = 0 ;
Hf_size_total = 0 ;
Hx_size_total = 0 ;
// determine the total size of all hash tables
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
if (taskid != TaskList [taskid].master)
{
// allocate a single shared hash table for all fine
// tasks that compute a single C(:,j)
continue ;
}
int64_t hash_size = TaskList [taskid].hsize ;
int64_t k = TaskList [taskid].vector ;
bool is_fine = (k >= 0) ;
bool use_Gustavson = (hash_size == cvlen) ;
// int64_t kfirst = TaskList [taskid].start ;
// int64_t klast = TaskList [taskid].end ;
if (is_fine && use_Gustavson)
{
// Hf is uint8_t for the fine Gustavson tasks, but round up
// to the nearest number of int64_t values.
Hf_size_total += GB_CEIL ((hash_size + hi_pad), sizeof (int64_t)) ;
}
else
{
// all other methods use Hf as int64_t
Hf_size_total += (hash_size + hi_pad) ;
}
if (!is_fine && !use_Gustavson)
{
// only coarse hash tasks need Hi
Hi_size_total += (hash_size + hi_pad) ;
}
// all tasks use an Hx array of size hash_size
if (!is_any_pair_semiring)
{
// except that the ANY_PAIR semiring does not use Hx
Hx_size_total += (hash_size * csize + hx_pad) ;
}
}
// allocate space for all hash tables
if (Hi_size_total > 0)
{
GB_MALLOC_MEMORY (Hi_all, Hi_size_total, sizeof (int64_t)) ;
}
if (Hf_size_total > 0)
{
GB_CALLOC_MEMORY (Hf_all, Hf_size_total, sizeof (int64_t)) ;
}
if (Hx_size_total > 0)
{
GB_MALLOC_MEMORY (Hx_all, Hx_size_total, 1) ;
}
if ((Hi_size_total > 0 && Hi_all == NULL) ||
(Hf_size_total > 0 && Hf_all == NULL) ||
(Hx_size_total > 0 && Hx_all == NULL))
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
// split the space into separate hash tables
int64_t *GB_RESTRICT Hi_split = Hi_all ;
int64_t *GB_RESTRICT Hf_split = Hf_all ;
GB_void *GB_RESTRICT Hx_split = Hx_all ;
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
if (taskid != TaskList [taskid].master)
{
// allocate a single hash table for all fine
// tasks that compute a single C(:,j)
continue ;
}
TaskList [taskid].Hi = Hi_split ;
TaskList [taskid].Hf = (void *) Hf_split ;
TaskList [taskid].Hx = Hx_split ;
int64_t hash_size = TaskList [taskid].hsize ;
int64_t k = TaskList [taskid].vector ;
bool is_fine = (k >= 0) ;
bool use_Gustavson = (hash_size == cvlen) ;
// int64_t kfirst = TaskList [taskid].start ;
// int64_t klast = TaskList [taskid].end ;
if (is_fine && use_Gustavson)
{
// Hf is uint8_t for the fine Gustavson method
Hf_split += GB_CEIL ((hash_size + hi_pad), sizeof (int64_t)) ;
}
else
{
// Hf is int64_t for all other methods
Hf_split += (hash_size + hi_pad) ;
}
if (!is_fine && !use_Gustavson)
{
// only coarse hash tasks need Hi
Hi_split += (hash_size + hi_pad) ;
}
// all tasks use an Hx array of size hash_size
if (!is_any_pair_semiring)
{
Hx_split += (hash_size * csize + hx_pad) ;
}
}
// assign shared hash tables to fine task teams
for (int taskid = 0 ; taskid < nfine ; taskid++)
{
int master = TaskList [taskid].master ;
ASSERT (TaskList [master].vector >= 0) ;
if (taskid != master)
{
// this fine task (Gustavson or hash) shares its hash table
// with all other tasks in its team, for a single vector C(:,j).
ASSERT (TaskList [taskid].vector == TaskList [master].vector) ;
TaskList [taskid].Hf = TaskList [master].Hf ;
TaskList [taskid].Hx = TaskList [master].Hx ;
}
}
//==========================================================================
// phase1: symbolic analysis
//==========================================================================
GB_AxB_saxpy3_symbolic (C, M, Mask_comp, Mask_struct, A, B, TaskList,
ntasks, nfine, nthreads) ;
//==========================================================================
// C = A*B, via saxpy3 method and built-in semiring
//==========================================================================
bool done = false ;
#ifndef GBCOMPACT
//--------------------------------------------------------------------------
// define the worker for the switch factory
//--------------------------------------------------------------------------
#define GB_Asaxpy3B(add,mult,xyname) GB_Asaxpy3B_ ## add ## mult ## xyname
#define GB_AxB_WORKER(add,mult,xyname) \
{ \
info = GB_Asaxpy3B (add,mult,xyname) (C, M, Mask_comp, \
Mask_struct, A, A_is_pattern, B, B_is_pattern, \
TaskList, ntasks, nfine, nthreads, Context) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//--------------------------------------------------------------------------
// launch the switch factory
//--------------------------------------------------------------------------
if (builtin_semiring)
{
#include "GB_AxB_factory.c"
}
#endif
//==========================================================================
// C = A*B, via the generic saxpy3 method, with typecasting
//==========================================================================
if (!done)
{
GB_BURBLE_MATRIX (C, "generic ") ;
info = GB_AxB_saxpy3_generic (C, M, Mask_comp, Mask_struct,
A, A_is_pattern, B, B_is_pattern, semiring, flipxy,
TaskList, ntasks, nfine, nthreads, Context) ;
}
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//==========================================================================
// prune empty vectors, free workspace, and return result
//==========================================================================
GB_FREE_WORK ;
info = GB_hypermatrix_prune (C, Context) ;
if (info == GrB_SUCCESS) { ASSERT_MATRIX_OK (C, "saxpy3: output", GB0) ; }
ASSERT (*Chandle == C) ;
ASSERT (!GB_ZOMBIES (C)) ;
ASSERT (!GB_PENDING (C)) ;
(*mask_applied) = (M != NULL) ;
return (info) ;
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
master_taskloop_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp master taskloop
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop'}}
#pragma omp master taskloop
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop'}}
#pragma omp master taskloop foo
void test_no_clause(void) {
int i;
#pragma omp master taskloop
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp master taskloop' must be a for loop}}
#pragma omp master taskloop
++i;
}
void test_branch_protected_scope(void) {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp master taskloop
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause(void) {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop foo bar
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{directive '#pragma omp master taskloop' cannot contain more than one 'nogroup' clause}}
#pragma omp master taskloop nogroup nogroup
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers(void) {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop;
for (i = 0; i < 16; ++i)
;
// expected-warning@+3 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
// expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp master taskloop'}}
#pragma omp parallel
#pragma omp master taskloop linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo(void);
void test_collapse(void) {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp master taskloop collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp master taskloop collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
#pragma omp master taskloop collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp master taskloop collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp master taskloop collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private(void) {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate(void) {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate(void) {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages(void) {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp master taskloop
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp master taskloop
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp master taskloop
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
|
conv_kernel_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: bhu@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "../conv_ref_kernel.h"
static int get_private_mem_size(struct ir_tensor* filter)
{
if (filter->data_type == TENGINE_DT_UINT8) // simulator uint8 inference with fp32
return filter->elem_num * filter->elem_size * 4;
else
return filter->elem_num * filter->elem_size; // caution
}
static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info)
{
/* simply copy the data */
memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size);
}
static void interleave_uint8(struct ir_tensor* filter, struct conv_priv_info* priv_info)
{
/* dequant uint8 weight to fp32 for simulator */
float* weight_fp32 = ( float* )priv_info->interleave_buffer;
uint8_t* weight_uint8 = ( uint8_t* )filter->data;
float scale = filter->scale;
int zero_point = filter->zero_point;
for (int i = 0; i < filter->elem_num; i++)
{
weight_fp32[i] = (( float )weight_uint8[i] - ( float )zero_point) * scale;
}
}
static inline void copy_one_element(void* src, void* dst, int src_off, int dst_off, int elem_type, int zero_point,
float scale)
{
switch (elem_type)
{
case TENGINE_DT_FP32:
case TENGINE_DT_INT32:
{
int32_t* int_dst = dst;
int32_t* int_src = src;
int_dst[dst_off] = int_src[src_off];
}
break;
case TENGINE_DT_FP16:
case TENGINE_DT_INT16:
{
int16_t* int_dst = dst;
int16_t* int_src = src;
int_dst[dst_off] = int_src[src_off];
}
break;
case TENGINE_DT_INT8:
{
int8_t* int_dst = dst;
int8_t* int_src = src;
int_dst[dst_off] = int_src[src_off] - zero_point;
}
break;
case TENGINE_DT_UINT8: // simulator uint8 inference with fp32
{
float* int_dst = dst;
uint8_t* int_src = src;
int_dst[dst_off] = (( float )int_src[src_off] - ( float )zero_point) * scale;
}
break;
}
}
static inline void zero_one_element(void* dst, int dst_off, int elem_type)
{
switch (elem_type)
{
case TENGINE_DT_FP32:
case TENGINE_DT_INT32:
{
int32_t* int_dst = dst;
int_dst[dst_off] = 0x0;
}
break;
case TENGINE_DT_FP16:
case TENGINE_DT_INT16:
{
int16_t* int_dst = dst;
int_dst[dst_off] = 0x0;
}
break;
case TENGINE_DT_INT8:
case TENGINE_DT_UINT8:
{
float* int_dst = dst; // simulator uint8 inference with fp32
int_dst[dst_off] = 0x0;
}
break;
}
}
static void im2col_fp32(struct ir_tensor* input, struct ir_tensor* output, struct conv_priv_info* priv_info,
struct conv_param* param, int n, int group)
{
int input_chan = param->input_channel / param->group;
int image_size = input->dims[1] * input->dims[2] * input->dims[3];
int group_size = input_chan * input->dims[2] * input->dims[3];
void* input_base = input->data + (n * image_size + group * group_size) * input->elem_size;
void* im2col_buf = priv_info->im2col_buffer;
float scale = input->scale;
int zero_point = input->zero_point;
int k_h = param->kernel_h;
int k_w = param->kernel_w;
int in_c = input_chan;
int in_h = input->dims[2];
int in_w = input->dims[3];
int out_h = output->dims[2];
int out_w = output->dims[3];
int s_h = param->stride_h;
int s_w = param->stride_w;
int p_h0 = param->pad_h0;
int p_w0 = param->pad_w0;
int d_h = param->dilation_h;
int d_w = param->dilation_w;
int data_type = input->data_type;
int kernel_size = k_h * k_w * in_c;
for (int i = 0; i < kernel_size; i++)
{
int c_off = i / (k_h * k_w);
int c_left = i % (k_h * k_w);
int kh_off = c_left / k_w;
int kw_off = c_left % k_w;
for (int l = 0; l < out_h; l++)
{
for (int m = 0; m < out_w; m++)
{
int out_off = (l * out_w + m) * kernel_size + i;
int img_h = l * s_h - p_h0 + kh_off * d_h;
int img_w = m * s_w - p_w0 + kw_off * d_w;
if (img_h >= 0 && img_w >= 0 && img_h < in_h && img_w < in_w)
{
int in_off = c_off * in_h * in_w + img_h * in_w + img_w;
copy_one_element(input_base, im2col_buf, in_off, out_off, data_type, zero_point, scale);
}
else
zero_one_element(im2col_buf, out_off, data_type);
}
}
}
}
static void im2col_uint8(struct ir_tensor* input, struct ir_tensor* output, struct conv_priv_info* priv_info,
struct conv_param* param, int n, int group)
{
int input_chan = param->input_channel / param->group;
int image_size = input->dims[1] * input->dims[2] * input->dims[3];
int group_size = input_chan * input->dims[2] * input->dims[3];
void* input_base = input->data + (n * image_size + group * group_size) * input->elem_size;
void* im2col_buf = priv_info->im2col_buffer;
float scale = input->scale;
int zero_point = input->zero_point;
int k_h = param->kernel_h;
int k_w = param->kernel_w;
int in_c = input_chan;
int in_h = input->dims[2];
int in_w = input->dims[3];
int out_h = output->dims[2];
int out_w = output->dims[3];
int s_h = param->stride_h;
int s_w = param->stride_w;
int p_h0 = param->pad_h0;
int p_w0 = param->pad_w0;
int d_h = param->dilation_h;
int d_w = param->dilation_w;
int data_type = input->data_type;
int kernel_size = k_h * k_w * in_c;
for (int i = 0; i < kernel_size; i++)
{
int c_off = i / (k_h * k_w);
int c_left = i % (k_h * k_w);
int kh_off = c_left / k_w;
int kw_off = c_left % k_w;
for (int l = 0; l < out_h; l++)
{
for (int m = 0; m < out_w; m++)
{
int out_off = (l * out_w + m) * kernel_size + i;
int img_h = l * s_h - p_h0 + kh_off * d_h;
int img_w = m * s_w - p_w0 + kw_off * d_w;
if (img_h >= 0 && img_w >= 0 && img_h < in_h && img_w < in_w)
{
int in_off = c_off * in_h * in_w + img_h * in_w + img_w;
copy_one_element(input_base, im2col_buf, in_off, out_off, data_type, zero_point, scale);
}
else
zero_one_element(im2col_buf, out_off, data_type);
}
}
}
}
static void sgemm_fp32(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias,
struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
float* interleave_fp32 = ( float* )priv_info->interleave_buffer + outchan_g * group * kernel_size;
float* im2col_fp32 = priv_info->im2col_buffer;
float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w;
float* bias_fp32 = NULL;
if (bias)
bias_fp32 = ( float* )bias->data + outchan_g * group;
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outchan_g; i++)
{
float* kernel = interleave_fp32 + i * kernel_size;
float* input = im2col_fp32;
float* output = output_fp32 + i * (out_h * out_w);
for (int j = 0; j < out_h * out_w; j++)
{
int im2col_off = j * kernel_size;
float sum = 0.f;
for (int k = 0; k < kernel_size; k++)
{
sum += kernel[k] * input[im2col_off + k];
}
output[0] = sum;
output++;
}
}
// process bias
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_fp32[output_off] += bias_fp32[i];
}
}
}
// process activation relu
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
// process activation relu6
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
}
static void sgemm_uint8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias,
struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_c = output->dims[1];
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = out_c * out_h * out_w;
int out_group_size = outchan_g * out_h * out_w;
float* interleave_fp32 = ( float* )priv_info->interleave_buffer + outchan_g * group * kernel_size;
float* im2col_fp32 = priv_info->im2col_buffer;
float* output_fp32 = ( float* )sys_malloc(out_group_size * sizeof(float));
uint8_t* output_uint8 = ( uint8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w;
int32_t* bias_int32 = NULL;
float bias_scale = 0.f;
if (bias)
{
bias_int32 = ( int32_t* )bias->data + outchan_g * group;
bias_scale = input->scale * filter->scale;
}
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outchan_g; i++)
{
float* kernel = interleave_fp32 + i * kernel_size;
float* input = im2col_fp32;
float* output = output_fp32 + i * (out_h * out_w);
for (int j = 0; j < out_h * out_w; j++)
{
int im2col_off = j * kernel_size;
float sum = 0.f;
for (int k = 0; k < kernel_size; k++)
{
sum += kernel[k] * input[im2col_off + k];
}
output[0] = sum;
output++;
}
}
// process bias
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_fp32[output_off] += (float )bias_int32[i] * bias_scale;
}
}
}
// process activation relu
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
// process activation relu6
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to uint8 */
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
int udata = ( int )(round(output_fp32[output_off] / output->scale) + output->zero_point);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[output_off] = udata;
}
}
sys_free(output_fp32);
}
int conv_kernel_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param)
{
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int output_xy = output->dims[2] * output->dims[3];
int elem_size = input->elem_size;
// simulator uint8 inference with fp32
if (input->data_type == TENGINE_DT_UINT8)
elem_size = 4;
return elem_size * output_xy * kernel_size;
}
int conv_kernel_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
if (!priv_info->external_im2col_mem)
{
int mem_size = conv_kernel_get_shared_mem_size(input_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
if (input_tensor->data_type == TENGINE_DT_UINT8)
interleave_uint8(filter_tensor, priv_info);
else
interleave(filter_tensor, priv_info);
return 0;
}
int conv_kernel_postrun(struct conv_priv_info* priv_info)
{
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
return 0;
}
int conv_kernel_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
int group = param->group;
int type = input_tensor->data_type;
for (int i = 0; i < input_tensor->dims[0]; i++) // batch size
{
for (int j = 0; j < group; j++)
{
if (type == TENGINE_DT_FP32)
{
im2col_fp32(input_tensor, output_tensor, priv_info, param, i, j);
sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
}
else if (type == TENGINE_DT_UINT8)
{
im2col_uint8(input_tensor, output_tensor, priv_info, param, i, j);
sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
}
}
}
return 0;
}
int conv_kernel_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_mem = 1;
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
return 0;
}
|
csr_matvec.c | /*BHEADER**********************************************************************
* Copyright (c) 2006 The Regents of the University of California.
* Produced at the Lawrence Livermore National Laboratory.
* Written by the HYPRE team. UCRL-CODE-222953.
* All rights reserved.
*
* This file is part of HYPRE (see http://www.llnl.gov/CASC/hypre/).
* Please see the COPYRIGHT_and_LICENSE file for the copyright notice,
* disclaimer, contact information and the GNU Lesser General Public License.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU General Public License (as published by the Free Software
* Foundation) version 2.1 dated February 1999.
*
* HYPRE is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the terms and conditions of the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* $Revision: 2.10 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "headers.h"
#include <assert.h>
#include "omp.h"
//#include "causalProfiler.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvec( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
int *A_rownnz = hypre_CSRMatrixRownnz(A);
int num_rownnz = hypre_CSRMatrixNumRownnz(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
int num_vectors = hypre_VectorNumVectors(x);
int idxstride_y = hypre_VectorIndexStride(y);
int vecstride_y = hypre_VectorVectorStride(y);
int idxstride_x = hypre_VectorIndexStride(x);
int vecstride_x = hypre_VectorVectorStride(x);
double temp, tempx;
int i, j, jj;
int m;
double xpar=0.7;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
//__WHATIF__BEGIN__
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= temp;
}
}
//__WHATIF__END__
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
/* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */
if (num_rownnz < xpar*(num_rows))
{
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
/*
* for (jj = A_i[m]; jj < A_i[m+1]; jj++)
* {
* j = A_j[jj];
* y_data[m] += A_data[jj] * x_data[j];
* } */
if ( num_vectors==1 )
{
tempx = y_data[m];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[A_j[jj]];
y_data[m] = tempx;
}
else
for ( j=0; j<num_vectors; ++j )
{
tempx = y_data[ j*vecstride_y + m*idxstride_y ];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
y_data[ j*vecstride_y + m*idxstride_y] = tempx;
}
}
}
else
{
#pragma omp parallel for private(i,jj,temp) schedule(dynamic, num_rows/16)
//#pragma omp parallel for private(i,jj,temp) schedule(dynamic)
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
else
for ( j=0; j<num_vectors; ++j )
{
temp = y_data[ j*vecstride_y + i*idxstride_y ];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
temp += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
}
y_data[ j*vecstride_y + i*idxstride_y ] = temp;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvecT
*
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvecT( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
int num_vectors = hypre_VectorNumVectors(x);
int idxstride_y = hypre_VectorIndexStride(y);
int vecstride_y = hypre_VectorVectorStride(y);
int idxstride_x = hypre_VectorIndexStride(x);
int vecstride_x = hypre_VectorVectorStride(x);
double temp;
int i, i1, j, jv, jj, ns, ne, size, rest;
int num_threads;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_rows != x_size)
ierr = 1;
if (num_cols != y_size)
ierr = 2;
if (num_rows != x_size && num_cols != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
for (i1 = 0; i1 < num_threads; i1++)
{
size = num_cols/num_threads;
rest = num_cols - size*num_threads;
if (i1 < rest)
{
ns = i1*size+i1-1;
ne = (i1+1)*size+i1+1;
}
else
{
ns = i1*size+rest-1;
ne = (i1+1)*size+rest;
}
if ( num_vectors==1 )
{
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
if (j > ns && j < ne)
y_data[j] += A_data[jj] * x_data[i];
}
}
}
else
{
for (i = 0; i < num_rows; i++)
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
if (j > ns && j < ne)
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x];
}
}
}
}
}
}
else
{
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[j] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ];
}
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvec_FF( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y,
int *CF_marker_x,
int *CF_marker_y,
int fpt )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
double temp;
int i, jj;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= alpha;
}
return ierr;
}
|
wave_billiard.c | /*********************************************************************************/
/* */
/* Animation of wave equation in a planar domain */
/* */
/* N. Berglund, december 2012, may 2021 */
/* */
/* UPDATE 24/04: distinction between damping and "elasticity" parameters */
/* UPDATE 27/04: new billiard shapes, bug in color scheme fixed */
/* UPDATE 28/04: code made more efficient, with help of Marco Mancini */
/* */
/* Feel free to reuse, but if doing so it would be nice to drop a */
/* line to nils.berglund@univ-orleans.fr - Thanks! */
/* */
/* compile with */
/* gcc -o wave_billiard wave_billiard.c */
/* -L/usr/X11R6/lib -ltiff -lm -lGL -lGLU -lX11 -lXmu -lglut -O3 -fopenmp */
/* */
/* OMP acceleration may be more effective after executing */
/* export OMP_NUM_THREADS=2 in the shell before running the program */
/* */
/* To make a video, set MOVIE to 1 and create subfolder tif_wave */
/* It may be possible to increase parameter PAUSE */
/* */
/* create movie using */
/* ffmpeg -i wave.%05d.tif -vcodec libx264 wave.mp4 */
/* */
/*********************************************************************************/
/*********************************************************************************/
/* */
/* NB: The algorithm used to simulate the wave equation is highly paralellizable */
/* One could make it much faster by using a GPU */
/* */
/*********************************************************************************/
#include <math.h>
#include <string.h>
#include <GL/glut.h>
#include <GL/glu.h>
#include <unistd.h>
#include <sys/types.h>
#include <tiffio.h> /* Sam Leffler's libtiff library. */
#include <omp.h>
#define MOVIE 0 /* set to 1 to generate movie */
#define DOUBLE_MOVIE 0 /* set to 1 to produce movies for wave height and energy simultaneously */
/* General geometrical parameters */
#define WINWIDTH 1280 /* window width */
#define WINHEIGHT 720 /* window height */
#define NX 1280 /* number of grid points on x axis */
#define NY 720 /* number of grid points on y axis */
#define XMIN -2.0
#define XMAX 2.0 /* x interval */
#define YMIN -1.125
#define YMAX 1.125 /* y interval for 9/16 aspect ratio */
#define JULIA_SCALE 1.0 /* scaling for Julia sets */
/* Choice of the billiard table */
#define B_DOMAIN 38 /* choice of domain shape, see list in global_pdes.c */
#define CIRCLE_PATTERN 2 /* pattern of circles or polygons, see list in global_pdes.c */
#define P_PERCOL 0.25 /* probability of having a circle in C_RAND_PERCOL arrangement */
#define NPOISSON 300 /* number of points for Poisson C_RAND_POISSON arrangement */
#define RANDOM_POLY_ANGLE 1 /* set to 1 to randomize angle of polygons */
#define LAMBDA 0.9 /* parameter controlling the dimensions of domain */
#define MU 0.03 /* parameter controlling the dimensions of domain */
#define NPOLY 6 /* number of sides of polygon */
#define APOLY 2.0 /* angle by which to turn polygon, in units of Pi/2 */
#define MDEPTH 5 /* depth of computation of Menger gasket */
#define MRATIO 3 /* ratio defining Menger gasket */
#define MANDELLEVEL 1000 /* iteration level for Mandelbrot set */
#define MANDELLIMIT 10.0 /* limit value for approximation of Mandelbrot set */
#define FOCI 1 /* set to 1 to draw focal points of ellipse */
#define NGRIDX 10 /* number of grid point for grid of disks */
#define NGRIDY 12 /* number of grid point for grid of disks */
// #define NGRIDX 16 /* number of grid point for grid of disks */
// #define NGRIDY 20 /* number of grid point for grid of disks */
#define X_SHOOTER -0.2
#define Y_SHOOTER -0.6
#define X_TARGET 0.4
#define Y_TARGET 0.7 /* shooter and target positions in laser fight */
#define ISO_XSHIFT_LEFT -2.9
#define ISO_XSHIFT_RIGHT 1.4
#define ISO_YSHIFT_LEFT -0.15
#define ISO_YSHIFT_RIGHT -0.15
#define ISO_SCALE 0.5 /* coordinates for isospectral billiards */
/* You can add more billiard tables by adapting the functions */
/* xy_in_billiard and draw_billiard below */
/* Physical parameters of wave equation */
#define TWOSPEEDS 0 /* set to 1 to replace hardcore boundary by medium with different speed */
#define OSCILLATE_LEFT 0 /* set to 1 to add oscilating boundary condition on the left */
#define OSCILLATE_TOPBOT 0 /* set to 1 to enforce a planar wave on top and bottom boundary */
#define OMEGA 0.002 /* frequency of periodic excitation */
#define AMPLITUDE 1.0 /* amplitude of periodic excitation */
#define COURANT 0.02 /* Courant number */
#define COURANTB 0.01 /* Courant number in medium B */
#define GAMMA 0.0 /* damping factor in wave equation */
// #define GAMMAB 5.0e-3 /* damping factor in wave equation */
// #define GAMMAB 1.0e-2 /* damping factor in wave equation */
#define GAMMAB 1.0e-6 /* damping factor in wave equation */
#define GAMMA_SIDES 1.0e-4 /* damping factor on boundary */
#define GAMMA_TOPBOT 1.0e-7 /* damping factor on boundary */
#define KAPPA 0.0 /* "elasticity" term enforcing oscillations */
#define KAPPA_SIDES 5.0e-4 /* "elasticity" term on absorbing boundary */
#define KAPPA_TOPBOT 0.0 /* "elasticity" term on absorbing boundary */
/* The Courant number is given by c*DT/DX, where DT is the time step and DX the lattice spacing */
/* The physical damping coefficient is given by GAMMA/(DT)^2 */
/* Increasing COURANT speeds up the simulation, but decreases accuracy */
/* For similar wave forms, COURANT^2*GAMMA should be kept constant */
/* Boundary conditions, see list in global_pdes.c */
#define B_COND 3
/* Parameters for length and speed of simulation */
// #define NSTEPS 500 /* number of frames of movie */
#define NSTEPS 1500 /* number of frames of movie */
#define NVID 40 /* number of iterations between images displayed on screen */
#define NSEG 100 /* number of segments of boundary */
#define INITIAL_TIME 0 /* time after which to start saving frames */
#define BOUNDARY_WIDTH 2 /* width of billiard boundary */
#define PAUSE 1000 /* number of frames after which to pause */
#define PSLEEP 1 /* sleep time during pause */
#define SLEEP1 1 /* initial sleeping time */
#define SLEEP2 1 /* final sleeping time */
#define MID_FRAMES 20 /* number of still frames between parts of two-part movie */
#define END_FRAMES 50 /* number of still frames at end of movie */
/* Parameters of initial condition */
#define INITIAL_AMP 0.75 /* amplitude of initial condition */
// #define INITIAL_VARIANCE 0.0003 /* variance of initial condition */
// #define INITIAL_WAVELENGTH 0.015 /* wavelength of initial condition */
#define INITIAL_VARIANCE 0.0003 /* variance of initial condition */
#define INITIAL_WAVELENGTH 0.015 /* wavelength of initial condition */
/* Plot type, see list in global_pdes.c */
#define PLOT 1
#define PLOT_B 0 /* plot type for second movie */
/* Color schemes */
#define COLOR_PALETTE 14 /* Color palette, see list in global_pdes.c */
#define BLACK 1 /* background */
#define COLOR_SCHEME 3 /* choice of color scheme, see list in global_pdes.c */
#define SCALE 0 /* set to 1 to adjust color scheme to variance of field */
// #define SLOPE 0.25 /* sensitivity of color on wave amplitude */
#define SLOPE 1.0 /* sensitivity of color on wave amplitude */
#define ATTENUATION 0.0 /* exponential attenuation coefficient of contrast with time */
// #define E_SCALE 150.0 /* scaling factor for energy representation */
#define E_SCALE 100.0 /* scaling factor for energy representation */
#define COLORHUE 260 /* initial hue of water color for scheme C_LUM */
#define COLORDRIFT 0.0 /* how much the color hue drifts during the whole simulation */
#define LUMMEAN 0.5 /* amplitude of luminosity variation for scheme C_LUM */
#define LUMAMP 0.3 /* amplitude of luminosity variation for scheme C_LUM */
#define HUEMEAN 180.0 /* mean value of hue for color scheme C_HUE */
// #define HUEMEAN 210.0 /* mean value of hue for color scheme C_HUE */
#define HUEAMP -180.0 /* amplitude of variation of hue for color scheme C_HUE */
// #define HUEAMP -180.0 /* amplitude of variation of hue for color scheme C_HUE */
#define DRAW_COLOR_SCHEME 1 /* set to 1 to plot the color scheme */
#define COLORBAR_RANGE 2.0 /* scale of color scheme bar */
#define COLORBAR_RANGE_B 12.0 /* scale of color scheme bar for 2nd part */
#define ROTATE_COLOR_SCHEME 1 /* set to 1 to draw color scheme horizontally */
#define SAVE_TIME_SERIES 0 /* set to 1 to save wave time series at a point */
/* For debugging purposes only */
#define FLOOR 0 /* set to 1 to limit wave amplitude to VMAX */
#define VMAX 10.0 /* max value of wave amplitude */
#include "global_pdes.c" /* constants and global variables */
#include "sub_wave.c" /* common functions for wave_billiard, heat and schrodinger */
#include "wave_common.c" /* common functions for wave_billiard, wave_comparison, etc */
FILE *time_series_left, *time_series_right;
double courant2, courantb2; /* Courant parameters squared */
/*********************/
/* animation part */
/*********************/
void evolve_wave_half_old(double *phi_in[NX], double *psi_in[NX], double *phi_out[NX], double *psi_out[NX],
short int *xy_in[NX])
/* time step of field evolution */
/* phi is value of field at time t, psi at time t-1 */
{
int i, j, iplus, iminus, jplus, jminus;
double delta, x, y, c, cc, gamma;
static long time = 0;
time++;
// c = COURANT;
// cc = courant2;
#pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta,x,y,c,cc,gamma)
for (i=0; i<NX; i++){
for (j=0; j<NY; j++){
// if (xy_in[i][j])
// {
// c = COURANT;
// cc = courant2;
// gamma = GAMMA;
// }
if (xy_in[i][j] != 0)
{
c = COURANT;
cc = courant2;
if (xy_in[i][j] == 1) gamma = GAMMA;
else gamma = GAMMAB;
}
else if (TWOSPEEDS)
{
c = COURANTB;
cc = courantb2;
gamma = GAMMAB;
}
if ((TWOSPEEDS)||(xy_in[i][j] != 0)){
/* discretized Laplacian for various boundary conditions */
if ((B_COND == BC_DIRICHLET)||(B_COND == BC_ABSORBING))
{
iplus = (i+1); if (iplus == NX) iplus = NX-1;
iminus = (i-1); if (iminus == -1) iminus = 0;
jplus = (j+1); if (jplus == NY) jplus = NY-1;
jminus = (j-1); if (jminus == -1) jminus = 0;
}
else if (B_COND == BC_PERIODIC)
{
iplus = (i+1) % NX;
iminus = (i-1) % NX;
if (iminus < 0) iminus += NX;
jplus = (j+1) % NY;
jminus = (j-1) % NY;
if (jminus < 0) jminus += NY;
}
else if (B_COND == BC_VPER_HABS)
{
iplus = (i+1); if (iplus == NX) iplus = NX-1;
iminus = (i-1); if (iminus == -1) iminus = 0;
jplus = (j+1) % NY;
jminus = (j-1) % NY;
if (jminus < 0) jminus += NY;
}
/* imposing linear wave on top and bottom by making Laplacian 1d */
if (OSCILLATE_TOPBOT)
{
if (j == NY-1) jminus = NY-1;
else if (j == 0) jplus = 0;
}
delta = phi_in[iplus][j] + phi_in[iminus][j] + phi_in[i][jplus] + phi_in[i][jminus] - 4.0*phi_in[i][j];
x = phi_in[i][j];
y = psi_in[i][j];
/* evolve phi */
if ((B_COND == BC_PERIODIC)||(B_COND == BC_DIRICHLET))
phi_out[i][j] = -y + 2*x + cc*delta - KAPPA*x - gamma*(x-y);
else if (B_COND == BC_ABSORBING)
{
if ((i>0)&&(i<NX-1)&&(j>0)&&(j<NY-1))
phi_out[i][j] = -y + 2*x + cc*delta - KAPPA*x - gamma*(x-y);
/* upper border */
else if (j==NY-1)
phi_out[i][j] = x - c*(x - phi_in[i][NY-2]) - KAPPA_TOPBOT*x - GAMMA_TOPBOT*(x-y);
/* lower border */
else if (j==0)
phi_out[i][j] = x - c*(x - phi_in[i][1]) - KAPPA_TOPBOT*x - GAMMA_TOPBOT*(x-y);
/* right border */
if (i==NX-1)
phi_out[i][j] = x - c*(x - phi_in[NX-2][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y);
/* left border */
else if (i==0)
phi_out[i][j] = x - c*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y);
}
else if (B_COND == BC_VPER_HABS)
{
if ((i>0)&&(i<NX-1))
phi_out[i][j] = -y + 2*x + cc*delta - KAPPA*x - gamma*(x-y);
/* right border */
else if (i==NX-1)
phi_out[i][j] = x - c*(x - phi_in[NX-2][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y);
/* left border */
else if (i==0)
phi_out[i][j] = x - c*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y);
}
psi_out[i][j] = x;
/* add oscillating boundary condition on the left */
if ((i == 0)&&(OSCILLATE_LEFT)) phi_out[i][j] = AMPLITUDE*cos((double)time*OMEGA);
// psi_out[i][j] = x;
if (FLOOR)
{
if (phi_out[i][j] > VMAX) phi_out[i][j] = VMAX;
if (phi_out[i][j] < -VMAX) phi_out[i][j] = -VMAX;
if (psi_out[i][j] > VMAX) psi_out[i][j] = VMAX;
if (psi_out[i][j] < -VMAX) psi_out[i][j] = -VMAX;
}
}
}
}
// printf("phi(0,0) = %.3lg, psi(0,0) = %.3lg\n", phi[NX/2][NY/2], psi[NX/2][NY/2]);
}
void evolve_wave_half(double *phi_in[NX], double *psi_in[NX], double *phi_out[NX], double *psi_out[NX],
short int *xy_in[NX])
/* time step of field evolution */
/* phi is value of field at time t, psi at time t-1 */
/* this version of the function has been rewritten in order to minimize the number of if-branches */
{
int i, j, iplus, iminus, jplus, jminus;
double delta, x, y, c, cc, gamma;
static long time = 0;
static double tc[NX][NY], tcc[NX][NY], tgamma[NX][NY];
static short int first = 1;
time++;
/* initialize tables with wave speeds and dissipation */
if (first)
{
for (i=0; i<NX; i++){
for (j=0; j<NY; j++){
if (xy_in[i][j] != 0)
{
tc[i][j] = COURANT;
tcc[i][j] = courant2;
if (xy_in[i][j] == 1) tgamma[i][j] = GAMMA;
else tgamma[i][j] = GAMMAB;
}
else if (TWOSPEEDS)
{
tc[i][j] = COURANTB;
tcc[i][j] = courantb2;
tgamma[i][j] = GAMMAB;
}
}
}
first = 0;
}
#pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta,x,y)
/* evolution in the bulk */
for (i=1; i<NX-1; i++){
for (j=1; j<NY-1; j++){
if ((TWOSPEEDS)||(xy_in[i][j] != 0)){
x = phi_in[i][j];
y = psi_in[i][j];
/* discretized Laplacian */
delta = phi_in[i+1][j] + phi_in[i-1][j] + phi_in[i][j+1] + phi_in[i][j-1] - 4.0*x;
/* evolve phi */
phi_out[i][j] = -y + 2*x + tcc[i][j]*delta - KAPPA*x - tgamma[i][j]*(x-y);
psi_out[i][j] = x;
}
}
}
/* left boundary */
if (OSCILLATE_LEFT) for (j=1; j<NY-1; j++) phi_out[0][j] = AMPLITUDE*cos((double)time*OMEGA);
else for (j=1; j<NY-1; j++){
if ((TWOSPEEDS)||(xy_in[0][j] != 0)){
x = phi_in[0][j];
y = psi_in[0][j];
switch (B_COND) {
case (BC_DIRICHLET):
{
delta = phi_in[1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 3.0*x;
phi_out[0][j] = -y + 2*x + tcc[0][j]*delta - KAPPA*x - tgamma[0][j]*(x-y);
break;
}
case (BC_PERIODIC):
{
delta = phi_in[1][j] + phi_in[NX-1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 4.0*x;
phi_out[0][j] = -y + 2*x + tcc[0][j]*delta - KAPPA*x - tgamma[0][j]*(x-y);
break;
}
case (BC_ABSORBING):
{
delta = phi_in[1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 3.0*x;
phi_out[0][j] = x - tc[0][j]*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y);
break;
}
case (BC_VPER_HABS):
{
delta = phi_in[1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 3.0*x;
phi_out[0][j] = x - tc[0][j]*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y);
break;
}
}
psi_out[0][j] = x;
}
}
/* right boundary */
for (j=1; j<NY-1; j++){
if ((TWOSPEEDS)||(xy_in[NX-1][j] != 0)){
x = phi_in[NX-1][j];
y = psi_in[NX-1][j];
switch (B_COND) {
case (BC_DIRICHLET):
{
delta = phi_in[NX-2][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 3.0*x;
phi_out[NX-1][j] = -y + 2*x + tcc[NX-1][j]*delta - KAPPA*x - tgamma[NX-1][j]*(x-y);
break;
}
case (BC_PERIODIC):
{
delta = phi_in[NX-2][j] + phi_in[0][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 4.0*x;
phi_out[NX-1][j] = -y + 2*x + tcc[NX-1][j]*delta - KAPPA*x - tgamma[NX-1][j]*(x-y);
break;
}
case (BC_ABSORBING):
{
delta = phi_in[NX-2][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 3.0*x;
phi_out[NX-1][j] = x - tc[NX-1][j]*(x - phi_in[NX-2][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y);
break;
}
case (BC_VPER_HABS):
{
delta = phi_in[NX-2][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 3.0*x;
phi_out[NX-1][j] = x - tc[NX-1][j]*(x - phi_in[NX-2][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y);
break;
}
}
psi_out[NX-1][j] = x;
}
}
/* top boundary */
for (i=0; i<NX; i++){
if ((TWOSPEEDS)||(xy_in[i][NY-1] != 0)){
x = phi_in[i][NY-1];
y = psi_in[i][NY-1];
switch (B_COND) {
case (BC_DIRICHLET):
{
iplus = i+1; if (iplus == NX) iplus = NX-1;
iminus = i-1; if (iminus == -1) iminus = 0;
delta = phi_in[iplus][NY-1] + phi_in[iminus][NY-1] + phi_in[i][NY-2] - 3.0*x;
phi_out[i][NY-1] = -y + 2*x + tcc[i][NY-1]*delta - KAPPA*x - tgamma[i][NY-1]*(x-y);
break;
}
case (BC_PERIODIC):
{
iplus = (i+1) % NX;
iminus = (i-1) % NX;
if (iminus < 0) iminus += NX;
delta = phi_in[iplus][NY-1] + phi_in[iminus][NY-1] + phi_in[i][NY-2] + phi_in[i][0] - 4.0*x;
phi_out[i][NY-1] = -y + 2*x + tcc[i][NY-1]*delta - KAPPA*x - tgamma[i][NY-1]*(x-y);
break;
}
case (BC_ABSORBING):
{
iplus = (i+1); if (iplus == NX) iplus = NX-1;
iminus = (i-1); if (iminus == -1) iminus = 0;
delta = phi_in[iplus][NY-1] + phi_in[iminus][NY-1] + phi_in[i][NY-2] - 3.0*x;
phi_out[i][NY-1] = x - tc[i][NY-1]*(x - phi_in[i][NY-2]) - KAPPA_TOPBOT*x - GAMMA_TOPBOT*(x-y);
break;
}
case (BC_VPER_HABS):
{
iplus = (i+1); if (iplus == NX) iplus = NX-1;
iminus = (i-1); if (iminus == -1) iminus = 0;
delta = phi_in[iplus][NY-1] + phi_in[iminus][NY-1] + phi_in[i][NY-2] + phi_in[i][0] - 4.0*x;
if (i==0) phi_out[0][NY-1] = x - tc[0][NY-1]*(x - phi_in[1][NY-1]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y);
else phi_out[i][NY-1] = -y + 2*x + tcc[i][NY-1]*delta - KAPPA*x - tgamma[i][NY-1]*(x-y);
break;
}
}
psi_out[i][NY-1] = x;
}
}
/* bottom boundary */
for (i=0; i<NX; i++){
if ((TWOSPEEDS)||(xy_in[i][0] != 0)){
x = phi_in[i][0];
y = psi_in[i][0];
switch (B_COND) {
case (BC_DIRICHLET):
{
iplus = i+1; if (iplus == NX) iplus = NX-1;
iminus = i-1; if (iminus == -1) iminus = 0;
delta = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] - 3.0*x;
phi_out[i][0] = -y + 2*x + tcc[i][0]*delta - KAPPA*x - tgamma[i][0]*(x-y);
break;
}
case (BC_PERIODIC):
{
iplus = (i+1) % NX;
iminus = (i-1) % NX;
if (iminus < 0) iminus += NX;
delta = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] + phi_in[i][NY-1] - 4.0*x;
phi_out[i][0] = -y + 2*x + tcc[i][0]*delta - KAPPA*x - tgamma[i][0]*(x-y);
break;
}
case (BC_ABSORBING):
{
iplus = (i+1); if (iplus == NX) iplus = NX-1;
iminus = (i-1); if (iminus == -1) iminus = 0;
delta = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] - 3.0*x;
phi_out[i][0] = x - tc[i][0]*(x - phi_in[i][1]) - KAPPA_TOPBOT*x - GAMMA_TOPBOT*(x-y);
break;
}
case (BC_VPER_HABS):
{
iplus = (i+1); if (iplus == NX) iplus = NX-1;
iminus = (i-1); if (iminus == -1) iminus = 0;
delta = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] + phi_in[i][NY-1] - 4.0*x;
if (i==0) phi_out[0][0] = x - tc[0][0]*(x - phi_in[1][0]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y);
else phi_out[i][0] = -y + 2*x + tcc[i][0]*delta - KAPPA*x - tgamma[i][0]*(x-y);
break;
}
}
psi_out[i][0] = x;
}
}
/* add oscillating boundary condition on the left corners */
if (OSCILLATE_LEFT)
{
phi_out[0][0] = AMPLITUDE*cos((double)time*OMEGA);
phi_out[0][NY-1] = AMPLITUDE*cos((double)time*OMEGA);
}
/* for debugging purposes/if there is a risk of blow-up */
if (FLOOR) for (i=0; i<NX; i++){
for (j=0; j<NY; j++){
if (xy_in[i][j] != 0)
{
if (phi_out[i][j] > VMAX) phi_out[i][j] = VMAX;
if (phi_out[i][j] < -VMAX) phi_out[i][j] = -VMAX;
if (psi_out[i][j] > VMAX) psi_out[i][j] = VMAX;
if (psi_out[i][j] < -VMAX) psi_out[i][j] = -VMAX;
}
}
}
}
void evolve_wave(double *phi[NX], double *psi[NX], double *phi_tmp[NX], double *psi_tmp[NX], short int *xy_in[NX])
/* time step of field evolution */
/* phi is value of field at time t, psi at time t-1 */
{
evolve_wave_half(phi, psi, phi_tmp, psi_tmp, xy_in);
evolve_wave_half(phi_tmp, psi_tmp, phi, psi, xy_in);
// evolve_wave_half_old(phi, psi, phi_tmp, psi_tmp, xy_in);
// evolve_wave_half_old(phi_tmp, psi_tmp, phi, psi, xy_in);
}
void draw_color_bar(int plot, double range)
{
if (ROTATE_COLOR_SCHEME) draw_color_scheme(-1.0, -0.8, XMAX - 0.1, -1.0, plot, -range, range);
else draw_color_scheme(1.7, YMIN + 0.1, 1.9, YMAX - 0.1, plot, -range, range);
}
void animation()
{
double time, scale, ratio, startleft[2], startright[2];
double *phi[NX], *psi[NX], *phi_tmp[NX], *psi_tmp[NX], *total_energy[NX];
short int *xy_in[NX];
int i, j, s, sample_left[2], sample_right[2];
static int counter = 0;
long int wave_value;
if (SAVE_TIME_SERIES)
{
time_series_left = fopen("wave_left.dat", "w");
time_series_right = fopen("wave_right.dat", "w");
}
/* Since NX and NY are big, it seemed wiser to use some memory allocation here */
for (i=0; i<NX; i++)
{
phi[i] = (double *)malloc(NY*sizeof(double));
psi[i] = (double *)malloc(NY*sizeof(double));
phi_tmp[i] = (double *)malloc(NY*sizeof(double));
psi_tmp[i] = (double *)malloc(NY*sizeof(double));
total_energy[i] = (double *)malloc(NY*sizeof(double));
xy_in[i] = (short int *)malloc(NY*sizeof(short int));
}
/* initialise positions and radii of circles */
if ((B_DOMAIN == D_CIRCLES)||(B_DOMAIN == D_CIRCLES_IN_RECT)) init_circle_config(circles);
else if (B_DOMAIN == D_POLYGONS) init_polygon_config(polygons);
printf("Polygons initialized\n");
/* initialise polyline for von Koch and simular domains */
npolyline = init_polyline(MDEPTH, polyline);
for (i=0; i<npolyline; i++) printf("vertex %i: (%.3f, %.3f)\n", i, polyline[i].x, polyline[i].y);
courant2 = COURANT*COURANT;
courantb2 = COURANTB*COURANTB;
/* initialize wave with a drop at one point, zero elsewhere */
// init_circular_wave(0.0, -LAMBDA, phi, psi, xy_in);
/* initialize total energy table */
if ((PLOT == P_MEAN_ENERGY)||(PLOT_B == P_MEAN_ENERGY))
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
total_energy[i][j] = 0.0;
ratio = (XMAX - XMIN)/8.4; /* for Tokarsky billiard */
// isospectral_initial_point(0.2, 0.0, startleft, startright); /* for isospectral billiards */
homophonic_initial_point(0.5, -0.25, 1.5, -0.25, startleft, startright);
// homophonic_initial_point(0.5, -0.25, 1.5, -0.25, startleft, startright);
// printf("xleft = (%.3f, %.3f) xright = (%.3f, %.3f)\n", startleft[0], startleft[1], startright[0], startright[1]);
xy_to_ij(startleft[0], startleft[1], sample_left);
xy_to_ij(startright[0], startright[1], sample_right);
// printf("xleft = (%.3f, %.3f) xright = (%.3f, %.3f)\n", xin_left, yin_left, xin_right, yin_right);
// init_wave_flat(phi, psi, xy_in);
// init_wave_plus(LAMBDA - 0.3*MU, 0.5*MU, phi, psi, xy_in);
// init_wave(LAMBDA - 0.3*MU, 0.5*MU, phi, psi, xy_in);
// init_circular_wave(X_SHOOTER, Y_SHOOTER, phi, psi, xy_in);
// printf("Initializing wave\n");
// init_circular_wave(-1.0, 0.0, phi, psi, xy_in);
// printf("Wave initialized\n");
// init_circular_wave(0.0, 0.0, phi, psi, xy_in);
// add_circular_wave(-1.0, 0.0, LAMBDA, phi, psi, xy_in);
// add_circular_wave(1.0, -LAMBDA, 0.0, phi, psi, xy_in);
// add_circular_wave(-1.0, 0.0, -LAMBDA, phi, psi, xy_in);
init_circular_wave_xplusminus(startleft[0], startleft[1], startright[0], startright[1], phi, psi, xy_in);
// init_circular_wave_xplusminus(-0.9, 0.0, 0.81, 0.0, phi, psi, xy_in);
// init_circular_wave(-2.0*ratio, 0.0, phi, psi, xy_in);
// init_planar_wave(XMIN + 0.015, 0.0, phi, psi, xy_in);
// init_planar_wave(XMIN + 0.02, 0.0, phi, psi, xy_in);
// init_planar_wave(XMIN + 0.5, 0.0, phi, psi, xy_in);
// init_wave(-1.5, 0.0, phi, psi, xy_in);
// init_wave(0.0, 0.0, phi, psi, xy_in);
/* add a drop at another point */
// add_drop_to_wave(1.0, 0.7, 0.0, phi, psi);
// add_drop_to_wave(1.0, -0.7, 0.0, phi, psi);
// add_drop_to_wave(1.0, 0.0, -0.7, phi, psi);
blank();
glColor3f(0.0, 0.0, 0.0);
// draw_wave(phi, psi, xy_in, 1.0, 0, PLOT);
draw_wave_e(phi, psi, total_energy, xy_in, 1.0, 0, PLOT);
draw_billiard();
if (DRAW_COLOR_SCHEME) draw_color_bar(PLOT, COLORBAR_RANGE);
glutSwapBuffers();
sleep(SLEEP1);
for (i=0; i<=INITIAL_TIME + NSTEPS; i++)
{
//printf("%d\n",i);
/* compute the variance of the field to adjust color scheme */
/* the color depends on the field divided by sqrt(1 + variance) */
if (SCALE)
{
scale = sqrt(1.0 + compute_variance(phi,psi, xy_in));
// printf("Scaling factor: %5lg\n", scale);
}
else scale = 1.0;
// draw_wave(phi, psi, xy_in, scale, i, PLOT);
draw_wave_e(phi, psi, total_energy, xy_in, scale, i, PLOT);
for (j=0; j<NVID; j++)
{
evolve_wave(phi, psi, phi_tmp, psi_tmp, xy_in);
if (SAVE_TIME_SERIES)
{
wave_value = (long int)(phi[sample_left[0]][sample_left[1]]*1.0e16);
fprintf(time_series_left, "%019ld\n", wave_value);
wave_value = (long int)(phi[sample_right[0]][sample_right[1]]*1.0e16);
fprintf(time_series_right, "%019ld\n", wave_value);
if ((j == 0)&&(i%10 == 0)) printf("Frame %i of %i\n", i, NSTEPS);
// fprintf(time_series_right, "%.15f\n", phi[sample_right[0]][sample_right[1]]);
}
// if (i % 10 == 9) oscillate_linear_wave(0.2*scale, 0.15*(double)(i*NVID + j), -1.5, YMIN, -1.5, YMAX, phi, psi);
}
draw_billiard();
if (DRAW_COLOR_SCHEME) draw_color_bar(PLOT, COLORBAR_RANGE);
/* add oscillating waves */
// if (i%345 == 344)
// {
// add_circular_wave(1.0, -LAMBDA, 0.0, phi, psi, xy_in);
// }
glutSwapBuffers();
if (MOVIE)
{
if (i >= INITIAL_TIME) save_frame();
else printf("Initial phase time %i of %i\n", i, INITIAL_TIME);
if ((i >= INITIAL_TIME)&&(DOUBLE_MOVIE))
{
// draw_wave(phi, psi, xy_in, scale, i, PLOT_B);
draw_wave_e(phi, psi, total_energy, xy_in, scale, i, PLOT_B);
if (DRAW_COLOR_SCHEME) draw_color_bar(PLOT_B, COLORBAR_RANGE_B);
draw_billiard();
glutSwapBuffers();
save_frame_counter(NSTEPS + 21 + counter);
counter++;
}
/* it seems that saving too many files too fast can cause trouble with the file system */
/* so this is to make a pause from time to time - parameter PAUSE may need adjusting */
if (i % PAUSE == PAUSE - 1)
{
printf("Making a short pause\n");
sleep(PSLEEP);
s = system("mv wave*.tif tif_wave/");
}
}
}
if (MOVIE)
{
if (DOUBLE_MOVIE)
{
// draw_wave(phi, psi, xy_in, scale, i, PLOT);
draw_wave_e(phi, psi, total_energy, xy_in, scale, NSTEPS, PLOT);
draw_billiard();
if (DRAW_COLOR_SCHEME) draw_color_bar(PLOT, COLORBAR_RANGE);
glutSwapBuffers();
}
for (i=0; i<MID_FRAMES; i++) save_frame();
if (DOUBLE_MOVIE)
{
// draw_wave(phi, psi, xy_in, scale, i, PLOT_B);
draw_wave_e(phi, psi, total_energy, xy_in, scale, NSTEPS, PLOT_B);
draw_billiard();
if (DRAW_COLOR_SCHEME) draw_color_bar(PLOT_B, COLORBAR_RANGE_B);
glutSwapBuffers();
}
for (i=0; i<END_FRAMES; i++) save_frame_counter(NSTEPS + MID_FRAMES + 1 + counter + i);
s = system("mv wave*.tif tif_wave/");
}
for (i=0; i<NX; i++)
{
free(phi[i]);
free(psi[i]);
free(phi_tmp[i]);
free(psi_tmp[i]);
free(total_energy[i]);
free(xy_in[i]);
}
if (SAVE_TIME_SERIES)
{
fclose(time_series_left);
fclose(time_series_right);
}
}
void display(void)
{
glPushMatrix();
blank();
glutSwapBuffers();
blank();
glutSwapBuffers();
animation();
sleep(SLEEP2);
glPopMatrix();
glutDestroyWindow(glutGetWindow());
}
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(WINWIDTH,WINHEIGHT);
glutCreateWindow("Wave equation in a planar domain");
init();
glutDisplayFunc(display);
glutMainLoop();
return 0;
}
|
GB_unaryop__identity_uint16_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint16_int32
// op(A') function: GB_tran__identity_uint16_int32
// C type: uint16_t
// A type: int32_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint16_int32
(
uint16_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint16_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
parallelReadZarr.c | #include <stdio.h>
#include <stdint.h>
#include "cBlosc2/include/blosc2.h"
#include "cJSON/include/cjson/cJSON.h"
#include <omp.h>
#include <stdlib.h>
#include "mex.h"
//mex -v COPTIMFLAGS="-O3 -fwrapv -DNDEBUG" CFLAGS='$CFLAGS -O3 -fopenmp' LDFLAGS='$LDFLAGS -O3 -fopenmp' '-I/global/home/groups/software/sl-7.x86_64/modules/cBlosc/2.0.4/include/' '-I/global/home/groups/software/sl-7.x86_64/modules/cJSON/1.7.15/include/' '-L/global/home/groups/software/sl-7.x86_64/modules/cBlosc/2.0.4/lib64' -lblosc2 -L/global/home/groups/software/sl-7.x86_64/modules/cJSON/1.7.15/lib64' -lcjson zarrMex.c
const char fileSep =
#ifdef _WIN32
'\\';
#else
'/';
#endif
void* mallocDynamic(uint64_t x, uint64_t bits){
switch(bits){
case 8:
return malloc(x*sizeof(uint8_t));
case 16:
return malloc(x*sizeof(uint16_t));
case 32:
return malloc(x*sizeof(float));
case 64:
return malloc(x*sizeof(double));
default:
printf("Image is not 8/16 bit, single, or double. Using single.");
return malloc(x*sizeof(float));
}
}
struct chunkInfo{
char** chunkNames;
int64_t numChunks;
};
struct chunkAxisVals{
uint64_t x;
uint64_t y;
uint64_t z;
};
struct chunkAxisVals getChunkAxisVals(char* fileName){
struct chunkAxisVals cAV;
char* ptr;
cAV.x = strtol(fileName, &ptr, 10);
ptr++;
cAV.y = strtol(ptr, &ptr, 10);
ptr++;
cAV.z = strtol(ptr, &ptr, 10);
return cAV;
}
struct chunkInfo getChunkInfo(char* folderName, uint64_t startX, uint64_t startY, uint64_t startZ, uint64_t endX, uint64_t endY,uint64_t endZ,uint64_t chunkXSize,uint64_t chunkYSize,uint64_t chunkZSize){
struct chunkInfo cI;
cI.numChunks = 0;
cI.chunkNames = NULL;
uint64_t xStartAligned = startX-(startX%chunkXSize);
uint64_t yStartAligned = startY-(startY%chunkYSize);
uint64_t zStartAligned = startZ-(startZ%chunkZSize);
uint64_t xStartChunk = (xStartAligned/chunkXSize);
uint64_t yStartChunk = (yStartAligned/chunkYSize);
uint64_t zStartChunk = (zStartAligned/chunkZSize);
uint64_t xEndAligned = endX;
uint64_t yEndAligned = endY;
uint64_t zEndAligned = endZ;
if(xEndAligned%chunkXSize) xEndAligned = endX-(endX%chunkXSize)+chunkXSize;
if(yEndAligned%chunkYSize) yEndAligned = endY-(endY%chunkYSize)+chunkYSize;
if(zEndAligned%chunkZSize) zEndAligned = endZ-(endZ%chunkZSize)+chunkZSize;
uint64_t xEndChunk = (xEndAligned/chunkXSize);
uint64_t yEndChunk = (yEndAligned/chunkYSize);
uint64_t zEndChunk = (zEndAligned/chunkZSize);
uint64_t xChunks = (xEndChunk-xStartChunk);
uint64_t yChunks = (yEndChunk-yStartChunk);
uint64_t zChunks = (zEndChunk-zStartChunk);
uint64_t file_count = xChunks*yChunks*zChunks;
char** chunkNames = malloc(file_count*sizeof(char*));
#pragma omp parallel for collapse(3)
for(uint64_t x = xStartChunk; x < xEndChunk; x++){
for(uint64_t y = yStartChunk; y < yEndChunk; y++){
for(uint64_t z = zStartChunk; z < zEndChunk; z++){
uint64_t currFile = (z-zStartChunk)+((y-yStartChunk)*zChunks)+((x-xStartChunk)*yChunks*zChunks);
asprintf(&chunkNames[currFile],"%llu.%llu.%llu",x,y,z);
}
}
}
cI.chunkNames = chunkNames;
cI.numChunks = file_count;
return cI;
}
void setChunkShapeFromJSON(cJSON *json, uint64_t *x, uint64_t *y, uint64_t *z){
*x = json->child->valueint;
*y = json->child->next->valueint;
*z = json->child->next->next->valueint;
}
void setDTypeFromJSON(cJSON *json, char* dtype){
dtype[0] = json->valuestring[0];
dtype[1] = json->valuestring[1];
dtype[2] = json->valuestring[2];
dtype[3] = json->valuestring[3];
}
void setOrderFromJSON(cJSON *json, char* order){
*order = json->valuestring[0];
}
void setShapeFromJSON(cJSON *json, uint64_t *x, uint64_t *y, uint64_t *z){
*x = json->child->valueint;
*y = json->child->next->valueint;
*z = json->child->next->next->valueint;
}
void setValuesFromJSON(char* fileName,uint64_t *chunkXSize,uint64_t *chunkYSize,uint64_t *chunkZSize,char* dtype,char* order,uint64_t *shapeX,uint64_t *shapeY,uint64_t *shapeZ){
char* zArray = ".zarray";
char* fnFull = (char*)malloc(strlen(fileName)+9);
fnFull[0] = '\0';
char fileSepS[2];
fileSepS[0] = fileSep;
fileSepS[1] = '\0';
strcat(fnFull,fileName);
strcat(fnFull,fileSepS);
strcat(fnFull,zArray);
FILE *fileptr = fopen(fnFull, "rb");
if(!fileptr) mexErrMsgIdAndTxt("zarr:inputError","Failed to open JSON File: %s\n",fnFull);
free(fnFull);
fseek(fileptr, 0, SEEK_END);
long filelen = ftell(fileptr);
rewind(fileptr);
char* buffer = (char *)malloc((filelen));
fread(buffer, filelen, 1, fileptr);
fclose(fileptr);
cJSON *json = cJSON_ParseWithLength(buffer,filelen);
uint8_t flags[4] = {0,0,0,0};
while(!(flags[0] && flags[1] && flags[2] && flags[3])){
if(!json->string){
json = json->child;
continue;
}
else if(!strcmp(json->string,"chunks")){
setChunkShapeFromJSON(json, chunkXSize,chunkYSize,chunkZSize);
flags[0] = 1;
}
else if(!strcmp(json->string,"dtype")){
setDTypeFromJSON(json, dtype);
flags[1] = 1;
}
else if(!strcmp(json->string,"order")){
setOrderFromJSON(json, order);
flags[2] = 1;
}
else if(!strcmp(json->string,"shape")){
setShapeFromJSON(json, shapeX,shapeY,shapeZ);
flags[3] = 1;
}
json = json->next;
}
cJSON_Delete(json);
}
void parallelReadZarrMex(void* zarr, char* folderName,uint64_t startX, uint64_t startY, uint64_t startZ, uint64_t endX, uint64_t endY,uint64_t endZ,uint64_t chunkXSize,uint64_t chunkYSize,uint64_t chunkZSize,uint64_t shapeX,uint64_t shapeY,uint64_t shapeZ, uint64_t bits, char order){
char fileSepS[2];
fileSepS[0] = fileSep;
fileSepS[1] = '\0';
/* Initialize the Blosc compressor */
int32_t numWorkers = omp_get_max_threads();
blosc_init();
blosc_set_nthreads(numWorkers);
struct chunkInfo cI = getChunkInfo(folderName, startX, startY, startZ, endX, endY, endZ,chunkXSize,chunkYSize,chunkZSize);
if(!cI.chunkNames) mexErrMsgIdAndTxt("zarr:inputError","File \"%s\" cannot be opened",folderName);
int32_t batchSize = (cI.numChunks-1)/numWorkers+1;
uint64_t s = chunkXSize*chunkYSize*chunkZSize;
int32_t w;
int err = 0;
char errString[10000];
#pragma omp parallel for if(numWorkers<=cI.numChunks)
for(w = 0; w < numWorkers; w++){
void* bufferDest = mallocDynamic(s,bits);
uint64_t lastFileLen = 0;
char *buffer = NULL;
for(int64_t f = w*batchSize; f < (w+1)*batchSize; f++){
if(f>=cI.numChunks || err) break;
struct chunkAxisVals cAV = getChunkAxisVals(cI.chunkNames[f]);
//malloc +2 for null term and filesep
char *fileName = malloc(strlen(folderName)+strlen(cI.chunkNames[f])+2);
fileName[0] = '\0';
strcat(fileName,folderName);
strcat(fileName,fileSepS);
strcat(fileName,cI.chunkNames[f]);
FILE *fileptr = fopen(fileName, "rb");
if(!fileptr){
#pragma omp critical
{
err = 1;
sprintf(errString,"Could not open file: %s\n",fileName);
}
break;
}
free(fileName);
fseek(fileptr, 0, SEEK_END);
long filelen = ftell(fileptr);
rewind(fileptr);
if(lastFileLen < filelen){
//char *buffer = (char *)malloc((filelen));
free(buffer);
buffer = (char*) malloc(filelen);
lastFileLen = filelen;
}
fread(buffer, filelen, 1, fileptr);
fclose(fileptr);
// Decompress
int dsize = -1;
switch(bits){
case 8:
dsize = blosc2_decompress(buffer, filelen,bufferDest, s*sizeof(uint8_t));
break;
case 16:
dsize = blosc2_decompress(buffer, filelen,bufferDest, s*sizeof(uint16_t));
break;
case 32:
dsize = blosc2_decompress(buffer, filelen,bufferDest, s*sizeof(float));
break;
case 64:
dsize = blosc2_decompress(buffer, filelen,bufferDest, s*sizeof(double));
break;
}
if(dsize < 0){
#pragma omp critical
{
err = 1;
sprintf(errString,"Decompression error. Error code: %d ChunkName: %s/%s\n",dsize,folderName,cI.chunkNames[f]);
}
break;
}
//printf("ChunkName: %s\n",cI.chunkNames[f]);
//printf("w: %d b: %d\n",w,f);
if(order == 'F'){
for(int64_t z = cAV.z*chunkZSize; z < (cAV.z+1)*chunkZSize; z++){
if(z>=endZ) break;
else if(z<startZ) continue;
for(int64_t y = cAV.y*chunkYSize; y < (cAV.y+1)*chunkYSize; y++){
if(y>=endY) break;
else if(y<startY) continue;
if(((cAV.x*chunkXSize) < startX && ((cAV.x+1)*chunkXSize) > startX) || (cAV.x+1)*chunkXSize>endX){
if(((cAV.x*chunkXSize) < startX && ((cAV.x+1)*chunkXSize) > startX) && (cAV.x+1)*chunkXSize>endX){
switch(bits){
case 8:
memcpy((uint8_t*)zarr+(((cAV.x*chunkXSize)-startX+(startX%chunkXSize))+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(uint8_t*)bufferDest+((startX%chunkXSize)+((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),((endX%chunkXSize)-(startX%chunkXSize))*(bits/8));
break;
case 16:
memcpy((uint16_t*)zarr+(((cAV.x*chunkXSize)-startX+(startX%chunkXSize))+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(uint16_t*)bufferDest+((startX%chunkXSize)+((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),((endX%chunkXSize)-(startX%chunkXSize))*(bits/8));
break;
case 32:
memcpy((float*)zarr+(((cAV.x*chunkXSize)-startX+(startX%chunkXSize))+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(float*)bufferDest+((startX%chunkXSize)+((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),((endX%chunkXSize)-(startX%chunkXSize))*(bits/8));
break;
case 64:
memcpy((double*)zarr+(((cAV.x*chunkXSize)-startX+(startX%chunkXSize))+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(double*)bufferDest+((startX%chunkXSize)+((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),((endX%chunkXSize)-(startX%chunkXSize))*(bits/8));
break;
}
}
else if((cAV.x+1)*chunkXSize>endX){
switch(bits){
case 8:
memcpy((uint8_t*)zarr+(((cAV.x*chunkXSize)-startX)+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(uint8_t*)bufferDest+(((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),(endX%chunkXSize)*(bits/8));
break;
case 16:
memcpy((uint16_t*)zarr+(((cAV.x*chunkXSize)-startX)+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(uint16_t*)bufferDest+(((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),(endX%chunkXSize)*(bits/8));
break;
case 32:
memcpy((float*)zarr+(((cAV.x*chunkXSize)-startX)+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(float*)bufferDest+(((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),(endX%chunkXSize)*(bits/8));
break;
case 64:
memcpy((double*)zarr+(((cAV.x*chunkXSize)-startX)+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(double*)bufferDest+(((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),(endX%chunkXSize)*(bits/8));
break;
}
}
else if((cAV.x*chunkXSize) < startX && ((cAV.x+1)*chunkXSize) > startX){
switch(bits){
case 8:
memcpy((uint8_t*)zarr+(((cAV.x*chunkXSize-startX+(startX%chunkXSize)))+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(uint8_t*)bufferDest+((startX%chunkXSize)+((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),(chunkXSize-(startX%chunkXSize))*(bits/8));
break;
case 16:
memcpy((uint16_t*)zarr+(((cAV.x*chunkXSize-startX+(startX%chunkXSize)))+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(uint16_t*)bufferDest+((startX%chunkXSize)+((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),(chunkXSize-(startX%chunkXSize))*(bits/8));
break;
case 32:
memcpy((float*)zarr+(((cAV.x*chunkXSize-startX+(startX%chunkXSize)))+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(float*)bufferDest+((startX%chunkXSize)+((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),(chunkXSize-(startX%chunkXSize))*(bits/8));
break;
case 64:
memcpy((double*)zarr+(((cAV.x*chunkXSize-startX+(startX%chunkXSize)))+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(double*)bufferDest+((startX%chunkXSize)+((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),(chunkXSize-(startX%chunkXSize))*(bits/8));
break;
}
}
}
else{
switch(bits){
case 8:
memcpy((uint8_t*)zarr+(((cAV.x*chunkXSize)-startX)+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(uint8_t*)bufferDest+(((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),chunkXSize*(bits/8));
break;
case 16:
memcpy((uint16_t*)zarr+(((cAV.x*chunkXSize)-startX)+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(uint16_t*)bufferDest+(((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),chunkXSize*(bits/8));
break;
case 32:
memcpy((float*)zarr+(((cAV.x*chunkXSize)-startX)+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(float*)bufferDest+(((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),chunkXSize*(bits/8));
break;
case 64:
memcpy((double*)zarr+(((cAV.x*chunkXSize)-startX)+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)),(double*)bufferDest+(((y%chunkYSize)*chunkXSize)+((z%chunkZSize)*chunkXSize*chunkYSize)),chunkXSize*(bits/8));
break;
}
}
}
}
}
else if (order == 'C'){
for(int64_t x = cAV.x*chunkXSize; x < (cAV.x+1)*chunkXSize; x++){
if(x>=endX) break;
else if(x<startX) continue;
for(int64_t y = cAV.y*chunkYSize; y < (cAV.y+1)*chunkYSize; y++){
if(y>=endY) break;
else if(y<startY) continue;
for(int64_t z = cAV.z*chunkZSize; z < (cAV.z+1)*chunkZSize; z++){
if(z>=endZ) break;
else if(z<startZ) continue;
switch(bits){
case 8:
((uint8_t*)zarr)[(x-startX)+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)] = ((uint8_t*)bufferDest)[(z%chunkZSize)+((y%chunkYSize)*chunkZSize)+((x%chunkXSize)*chunkZSize*chunkYSize)];
break;
case 16:
((uint16_t*)zarr)[(x-startX)+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)] = ((uint16_t*)bufferDest)[(z%chunkZSize)+((y%chunkYSize)*chunkZSize)+((x%chunkXSize)*chunkZSize*chunkYSize)];
break;
case 32:
((float*)zarr)[(x-startX)+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)] = ((float*)bufferDest)[(z%chunkZSize)+((y%chunkYSize)*chunkZSize)+((x%chunkXSize)*chunkZSize*chunkYSize)];
break;
case 64:
((double*)zarr)[(x-startX)+((y-startY)*shapeX)+((z-startZ)*shapeX*shapeY)] = ((double*)bufferDest)[(z%chunkZSize)+((y%chunkYSize)*chunkZSize)+((x%chunkXSize)*chunkZSize*chunkYSize)];
break;
}
}
}
}
}
}
free(bufferDest);
free(buffer);
}
#pragma omp parallel for
for(int i = 0; i < cI.numChunks; i++){
free(cI.chunkNames[i]);
}
free(cI.chunkNames);
/* After using it, destroy the Blosc environment */
blosc_destroy();
if(err) mexErrMsgIdAndTxt("zarr:threadError",errString);
}
// TODO: FIX MEMORY LEAKS
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[])
{
uint64_t startX = 0;
uint64_t startY = 0;
uint64_t startZ = 0;
uint64_t endX = 0;
uint64_t endY = 0;
uint64_t endZ = 0;
if(!nrhs) mexErrMsgIdAndTxt("zarr:inputError","This functions requires at least one argument");
else if(nrhs == 2){
if(mxGetN(prhs[1]) != 6) mexErrMsgIdAndTxt("zarr:inputError","Input range is not 6");
startX = (uint64_t)*(mxGetPr(prhs[1]))-1;
startY = (uint64_t)*((mxGetPr(prhs[1])+1))-1;
startZ = (uint64_t)*((mxGetPr(prhs[1])+2))-1;
endX = (uint64_t)*((mxGetPr(prhs[1])+3));
endY = (uint64_t)*((mxGetPr(prhs[1])+4));
endZ = (uint64_t)*((mxGetPr(prhs[1])+5));
if(startX+1 < 1 || startY+1 < 1 || startZ+1 < 1) mexErrMsgIdAndTxt("zarr:inputError","Lower bounds must be at least 1");
}
else if (nrhs > 2) mexErrMsgIdAndTxt("zarr:inputError","Number of input arguments must be 1 or 2");
if(!mxIsChar(prhs[0])) mexErrMsgIdAndTxt("zarr:inputError","The first argument must be a string");
char* folderName = mxArrayToString(prhs[0]);
uint64_t shapeX = 0;
uint64_t shapeY = 0;
uint64_t shapeZ = 0;
uint64_t chunkXSize = 0;
uint64_t chunkYSize = 0;
uint64_t chunkZSize = 0;
char dtype[4];
char order;
setValuesFromJSON(folderName,&chunkXSize,&chunkYSize,&chunkZSize,dtype,&order,&shapeX,&shapeY,&shapeZ);
if(endX > shapeX || endY > shapeY || endZ > shapeZ) mexErrMsgIdAndTxt("zarr:inputError","Upper bound is invalid");
if(nrhs == 1){
endX = shapeX;
endY = shapeY;
endZ = shapeZ;
}
uint64_t dim[3];
shapeX = endX-startX;
shapeY = endY-startY;
shapeZ = endZ-startZ;
dim[0] = shapeX;
dim[1] = shapeY;
dim[2] = shapeZ;
if(dtype[1] == 'u' && dtype[2] == '1'){
uint64_t bits = 8;
plhs[0] = mxCreateNumericArray(3,dim,mxUINT8_CLASS, mxREAL);
uint8_t* zarr = (uint8_t*)mxGetPr(plhs[0]);
parallelReadZarrMex((void*)zarr,folderName,startX,startY,startZ,endX,endY,endZ,chunkXSize,chunkYSize,chunkZSize,shapeX,shapeY,shapeZ,bits,order);
}
else if(dtype[1] == 'u' && dtype[2] == '2'){
uint64_t bits = 16;
plhs[0] = mxCreateNumericArray(3,dim,mxUINT16_CLASS, mxREAL);
uint16_t* zarr = (uint16_t*)mxGetPr(plhs[0]);
parallelReadZarrMex((void*)zarr,folderName,startX,startY,startZ,endX,endY,endZ,chunkXSize,chunkYSize,chunkZSize,shapeX,shapeY,shapeZ,bits,order);
}
else if(dtype[1] == 'f' && dtype[2] == '4'){
uint64_t bits = 32;
plhs[0] = mxCreateNumericArray(3,dim,mxSINGLE_CLASS, mxREAL);
float* zarr = (float*)mxGetPr(plhs[0]);
parallelReadZarrMex((void*)zarr,folderName,startX,startY,startZ,endX,endY,endZ,chunkXSize,chunkYSize,chunkZSize,shapeX,shapeY,shapeZ,bits,order);
}
else if(dtype[1] == 'f' && dtype[2] == '8'){
uint64_t bits = 64;
plhs[0] = mxCreateNumericArray(3,dim,mxDOUBLE_CLASS, mxREAL);
double* zarr = (double*)mxGetPr(plhs[0]);
parallelReadZarrMex((void*)zarr,folderName,startX,startY,startZ,endX,endY,endZ,chunkXSize,chunkYSize,chunkZSize,shapeX,shapeY,shapeZ,bits,order);
}
else{
mexErrMsgIdAndTxt("tiff:dataTypeError","Data type not suppported");
}
}
|
progress.h | #pragma once
#include <iostream>
inline std::string center(std::string const &s, int n)
{
int l = s.length();
std::ostringstream o;
o << std::string((n - l) / 2, ' ') << s
<< std::string((n - (n - l)/2 - l), ' ');
return o.str();
}
class ProgressBar
{
size_t N, i, j;
std::string txt;
public:
ProgressBar(int N_, std::string const &txt_ = "crunching ..."):
N(N_), i(0), j(0), txt(center(txt_, 50))
{
draw();
}
void tic()
{
#pragma omp critical
{
++i;
if (i > N) i = N;
else if (i * 50 / N > j)
{
++j;
draw();
}
}
}
void finish()
{
j = 50;
draw();
std::cerr << std::endl;
}
void draw() const
{
std::cerr << "\r\033[m(\033[44;33;1m"
<< txt.substr(0, j) << "\033[m"
<< txt.substr(j, 50) << ")";
}
};
// not completely right ...
template <typename I>
class ProgressIterator: public I
{
ProgressBar B;
public:
ProgressIterator(I b, int N, std::string const &txt = "iterating ..."):
I(b), B(N, txt)
{}
~ProgressIterator()
{
B.finish();
}
ProgressIterator &operator++()
{
I::operator++();
B.tic();
return *this;
}
};
|
GB_unaryop__lnot_uint16_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_int64
// op(A') function: GB_tran__lnot_uint16_int64
// C type: uint16_t
// A type: int64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_int64
(
uint16_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_int16_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int16_int16
// op(A') function: GB_tran__minv_int16_int16
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 16)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int16_int16
(
int16_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hsaxpy.c | /**
* @file hsaxpy.c
* @brief Function definition for performing the \c saxpy operation on host.
*
* This source file contains function definition for the \c saxpy operation,
* which is defined as:
*
* y := a * x + y
*
* where:
*
* - a is a scalar.
* - x and y are single-precision vectors each with n elements.
*
* @author Xin Wu (PC²)
* @date 05.04.2020
* @copyright CC BY-SA 2.0
*/
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "wtcalc.h"
#include "hsaxpy.h"
void hsaxpy(const int n,
const float a,
const float *x,
float *y)
{
struct timespec rt[2];
/*
* - naive implementation
*/
clock_gettime(CLOCK_REALTIME, rt + 0);
#pragma omp parallel for simd schedule(simd:static) \
default(none) shared(a, n, x, y)
for (int i = 0; i < n; i++) {
y[i] = a * x[i] + y[i];
}
clock_gettime(CLOCK_REALTIME, rt + 1);
if (wtcalc >= 0.0) {
wtcalc += (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
}
}
|
bm2d_mdev_iteration.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include "homp.h"
#include "bm2d.h"
void bm2d_omp_mdev_iteration_launcher(omp_offloading_t *off, void *args) {
struct bm2d_off_args * iargs = (struct bm2d_off_args*) args;
long n = iargs->n;
long m = iargs->m;
int maxwin = iargs->maxwin;
int num_its = iargs->num_its;
long u_dimX = iargs->u_dimX;
long u_dimY = iargs->u_dimY;
int coeff_dimX = iargs->coeff_dimX;
omp_data_map_t * map_u = omp_map_get_map(off, iargs->u, -1); /* 1 is for the map u */
omp_data_map_t * map_uold = omp_map_get_map(off, iargs->uold, -1); /* 2 is for the map uld */
omp_data_map_t * map_coeff = omp_map_get_map(off, iargs->coeff, -1); /* 2 is for the map uld */
REAL * u = (REAL*) map_u->map_dev_wextra_ptr;
REAL * uold = (REAL*) map_uold->map_dev_wextra_ptr;
REAL *coeff = (REAL*) map_coeff->map_dev_wextra_ptr;
coeff = coeff + (2*maxwin+1) * maxwin + maxwin; /* TODO this should be a call to map a host-side address to dev-side address*/
long it; /* iteration */
#if CORRECTNESS_CHECK
printf("kernel launcher: u: %X, uold: %X\n", u, uold);
print_array("u in device: ", "udev", u, n, m);
print_array("uold in device: ", "uolddev", uold, n, m);
#endif
long offset;
long start;
long len;
/* row-wise dist */
offset = omp_loop_get_range(off, 0, &start, &len);
#if 0
/* col-wise dist */
omp_loop_get_range(off, 0, &start, &len);
/* row/col-wise dist */
omp_loop_get_range(off, 0, &start, &len); /* todo */
omp_loop_get_range(off, 0, &start, &len); /* todo */
#endif
omp_device_type_t devtype = off->dev->type;
void (*bm2d_kernel_wrapper)(omp_offloading_t *off, long start_n, long len_n, long n, long m, long u_dimX,
long u_dimY, REAL *u, REAL *uold, int maxwin, int coeff_dimX, REAL *coeff);
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
bm2d_kernel_wrapper = bm2d_nvgpu_cuda_wrapper;
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) {
#if defined(DEVICE_ITLMIC_SUPPORT)
bm2d_kernel_wrapper = bm2d_itlmic_wrapper;
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
bm2d_kernel_wrapper = bm2d_cpu_omp_wrapper;
} else {
fprintf(stderr, "device type is not supported for this call\n");
abort();
}
//printf("dev: %d, offset: %d, length: %d, local start: %d, u: %X, uold: %X, coeff-center: %X\n", off->devseqid, offset, len, start, u, uold, coeff);
//#pragma omp parallel shared(n, m, maxwin, coeff, num_its, u_dimX, u_dimY, coeff_dimX) private(it) firstprivate(u, uold)
omp_event_t *events = off->events;
omp_dev_stream_t *stream = off->stream;
omp_offloading_info_t * off_info = off->off_info;
omp_event_record_stop(&events[acc_kernel_exe_event_index]); /* match the runtime start */
omp_event_accumulate_elapsed_ms(&events[acc_kernel_exe_event_index], 0);
for (it = 0; it < num_its; it++) {
omp_event_record_start(&events[acc_kernel_exe_event_index]);
REAL * uu;
REAL * uuold;
if (it % 2 == 0) {
uu = u;
uuold = uold;
} else {
uu = uold;
uuold = u;
}
bm2d_kernel_wrapper(off, start, len, n, m, u_dimX, u_dimY, uu, uuold, maxwin, coeff_dimX, coeff);
//printf("iteration %d by dev: %d\n", it, off->dev->id);
omp_event_record_stop(&events[acc_kernel_exe_event_index]);
omp_event_accumulate_elapsed_ms(&events[acc_kernel_exe_event_index], 0);
}
/* match the runtime stop */
omp_event_record_start(&events[acc_kernel_exe_event_index]);
}
double bm2d_omp_mdev_iterate(int ndevs, int *targets, long n, long m, REAL *u, int maxwin, REAL *coeff,
int num_its) {
long u_dimX = n + 2 * maxwin;
long u_dimY = m + 2 * maxwin;
int coeff_dimX = 2*maxwin+1;
REAL * coeff_center = coeff + (2*maxwin+1) * maxwin + maxwin; /* let coeff point to the center element */
REAL *uold = (REAL *) omp_unified_malloc(sizeof(REAL) * u_dimX * u_dimY);
memcpy(uold, u, sizeof(REAL)*u_dimX * u_dimY);
//print_array("Before offloading", "u", u, u_dimX, u_dimY);
double off_init_time = read_timer_ms();
/**************************************** dist-specific *****************************************/
int __top_ndims__ = 1;
/* TODO: to use row/col-wise dist, __top_ndims__ should be set to 2 */
omp_grid_topology_t * __top__ = omp_grid_topology_init(ndevs, targets, __top_ndims__);
/* init other infos (dims, periodic, idmaps) of top if needed */
int __num_maps__ = 3; /* u, uold and the coeff */ /* XXX: need compiler output */
/* data copy offloading */
omp_offloading_info_t *__copy_data_off_info__ =
omp_offloading_init_info("data_copy", __top__, 1, OMP_OFFLOADING_DATA, __num_maps__, NULL, NULL, 0);
/* stencil kernel offloading */
struct bm2d_off_args off_args;
off_args.n = n; off_args.m = m; off_args.u = u; off_args.maxwin = maxwin; off_args.coeff = coeff; off_args.num_its = num_its;
off_args.uold = uold; off_args.coeff_center = coeff_center; off_args.coeff_dimX = coeff_dimX; off_args.u_dimX = u_dimX; off_args.u_dimY = u_dimY;
omp_offloading_info_t * __off_info__ =
omp_offloading_init_info("bm2d_kernel", __top__, 0, OMP_OFFLOADING_CODE, 0,
bm2d_omp_mdev_iteration_launcher, &off_args, 1);
omp_offloading_append_profile_per_iteration(__off_info__, 13*u_dimY, 7, 1);
//printf("data copy off: %X, bm2d off: %X\n", __copy_data_off_info__, __off_info__);
/* u map info */
omp_data_map_info_t *__u_map_info__ = &__copy_data_off_info__->data_map_info[0];
omp_data_map_init_info("u", __u_map_info__, __copy_data_off_info__, u, 2, sizeof(REAL), OMP_DATA_MAP_TOFROM, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_2d(__u_map_info__, u_dimX, u_dimY);
/* uold map info */
omp_data_map_info_t *__uold_map_info__ = &__copy_data_off_info__->data_map_info[1];
omp_data_map_init_info("uold", __uold_map_info__, __copy_data_off_info__, uold, 2, sizeof(REAL), OMP_DATA_MAP_TO, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_2d(__uold_map_info__, u_dimX, u_dimY);
/* coeff map info */
omp_data_map_info_t *__coeff_map_info__ = &__copy_data_off_info__->data_map_info[2];
omp_data_map_init_info("coeff", __coeff_map_info__, __copy_data_off_info__, coeff, 2, sizeof(REAL), OMP_DATA_MAP_TO, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_2d(__coeff_map_info__, coeff_dimX, coeff_dimX);
omp_data_map_dist_init_info(__coeff_map_info__, 0, OMP_DIST_POLICY_FULL, 0, coeff_dimX, 0, 0);
omp_data_map_dist_init_info(__coeff_map_info__, 1, OMP_DIST_POLICY_FULL, 0, coeff_dimX, 0, 0);
/**************************************** dist-specific *****************************************/
/* row-wise distribution */
#if 0
/* BLOCK_BLOCK */
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_BLOCK, maxwin, n, 0, 0);
omp_loop_dist_init_info(__off_info__, 0, OMP_DIST_POLICY_BLOCK, 0, n, 0, 0);
//printf("BLOCK dist policy for arrays and loop dist\n");
/* BLOCK_ALIGN */
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_BLOCK, maxwin, n, 0, 0);
omp_loop_dist_align_with_data_map(__off_info__, 0, 0, __u_map_info__, 0);
//printf("BLOCK dist policy for arrays, and loop dist align with array A row dist\n");
#endif
/* AUTO_ALIGN */
omp_loop_dist_init_info(__off_info__, 0, LOOP_DIST_POLICY, 0, n, LOOP_DIST_CHUNK_SIZE, 0);
omp_data_map_dist_align_with_loop(__u_map_info__, 0, maxwin, __off_info__, 0);
//printf("AUTO dist policy for loop dist and array align with loops\n");
/* used by all row-wise dist */
omp_data_map_dist_init_info(__u_map_info__, 1, OMP_DIST_POLICY_FULL, 0, u_dimY, 0, 0);
omp_map_add_halo_region(__u_map_info__, 0, maxwin, maxwin, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, OMP_ALIGNEE_OFFSET, __u_map_info__, OMP_ALL_DIMENSIONS);
#if 0
/* col-wise distribution */
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_FULL, maxwin, n, 0, 0);
omp_data_map_dist_init_info(__u_map_info__, 1, OMP_DIST_POLICY_BLOCK, maxwin, n, 0, 0);
omp_map_add_halo_region(__u_map_info__, 0, maxwin, maxwin, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, 0, __u_map_info__, OMP_ALL_DIMENSIONS);
omp_loop_dist_init_info(__off_info__, 1, OMP_DIST_POLICY_BLOCK, 0, m, 0, 0);
/* row/col-wise distribution */
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_BLOCK, maxwin, n, 0, 0);
omp_data_map_dist_init_info(__u_map_info__, 1, OMP_DIST_POLICY_BLOCK, maxwin, n, 0, 1);
omp_map_add_halo_region(__u_map_info__, 0, maxwin, maxwin, OMP_DIST_HALO_EDGING_REFLECTING);
omp_map_add_halo_region(__u_map_info__, 1, maxwin, maxwin, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, 0, __u_map_info__, OMP_ALL_DIMENSIONS);
omp_loop_dist_init_info(__off_info__, 0, OMP_DIST_POLICY_BLOCK, 0, n, 0, 0);
omp_loop_dist_init_info(__off_info__, 1, OMP_DIST_POLICY_BLOCK, 0, m, 0, 1);
#endif
#if 0
/* halo exchange offloading */
omp_data_map_halo_exchange_info_t x_halos[1];
x_halos[0].map_info = __u_map_info__; x_halos[0].x_direction = OMP_DATA_MAP_EXCHANGE_FROM_LEFT_RIGHT; /* u and uold*/
/* row-wise dist */
x_halos[0].x_dim = 0;
#if 0
/* col-wise dist */
x_halos[0].x_dim = 1;
/* row/col-wise dist */
x_halos[0].x_dim = -1; /* means all the dimension */
#endif
omp_offloading_append_data_exchange_info(__off_info__, x_halos, 1);
#endif
/************************************************************************************************/
off_init_time = read_timer_ms() - off_init_time;
/*********** NOW notifying helper thread to work on this offload ******************/
#if DEBUG_MSG
printf("=========================================== offloading to %d targets ==========================================\n", __num_target_devices__);
#endif
double off_copyto_time = read_timer_ms();
double start_time = off_copyto_time;
omp_offloading_start(__copy_data_off_info__);
off_copyto_time = read_timer_ms() - off_copyto_time;
omp_print_map_info(__u_map_info__);
omp_print_map_info(__uold_map_info__);
omp_print_map_info(__coeff_map_info__);
// printf("offloading from stencil now\n");
double off_kernel_time = read_timer_ms();
int it;
int num_runs = 10;
for (it=0; it< num_runs; it++) omp_offloading_start(__off_info__);
off_kernel_time = (read_timer_ms() - off_kernel_time)/ num_runs;
/* copy back u from each device and free others */
double off_copyfrom_time = read_timer_ms();
omp_offloading_start(__copy_data_off_info__);
off_copyfrom_time = read_timer_ms() - off_copyfrom_time;
double off_total = off_init_time + off_copyto_time + off_copyfrom_time + off_kernel_time;
#if defined (OMP_BREAKDOWN_TIMING)
/* not reporting status for data copy */
//omp_offloading_info_report_profile(__copy_data_off_info__);
omp_offloading_info_report_profile(__off_info__, num_runs);
//omp_offloading_info_t *infos[2];
//infos[0] = __copy_data_off_info__;
//infos[1] = __off_info__;
//omp_offloading_info_sum_profile(infos, 2, start_time, start_time+off_total);
//omp_offloading_info_report_profile(__copy_data_off_info__);
#endif
omp_offloading_fini_info(__copy_data_off_info__);
omp_offloading_fini_info(__off_info__);
omp_grid_topology_fini(__top__);
omp_unified_free(uold);
return off_total;
}
|
GB_unaryop__minv_bool_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_int8
// op(A') function: GB_tran__minv_bool_int8
// C type: bool
// A type: int8_t
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_int8
(
bool *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hopscotch_hash.h | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/**
* Hopscotch hash is modified from the code downloaded from
* https://sites.google.com/site/cconcurrencypackage/hopscotch-hashing
* with the following terms of usage
*/
////////////////////////////////////////////////////////////////////////////////
//TERMS OF USAGE
//------------------------------------------------------------------------------
//
// Permission to use, copy, modify and distribute this software and
// its documentation for any purpose is hereby granted without fee,
// provided that due acknowledgments to the authors are provided and
// this permission notice appears in all copies of the software.
// The software is provided "as is". There is no warranty of any kind.
//
//Authors:
// Maurice Herlihy
// Brown University
// and
// Nir Shavit
// Tel-Aviv University
// and
// Moran Tzafrir
// Tel-Aviv University
//
// Date: July 15, 2008.
//
////////////////////////////////////////////////////////////////////////////////
// Programmer : Moran Tzafrir (MoranTza@gmail.com)
// Modified : Jongsoo Park (jongsoo.park@intel.com)
// Oct 1, 2015.
//
////////////////////////////////////////////////////////////////////////////////
#ifndef hypre_HOPSCOTCH_HASH_HEADER
#define hypre_HOPSCOTCH_HASH_HEADER
//#include <strings.h>
#include <string.h>
#include <stdio.h>
#include <limits.h>
#include <math.h>
#ifdef HYPRE_USING_OPENMP
#include <omp.h>
#endif
#include "_hypre_utilities.h"
// Potentially architecture specific features used here:
// __sync_val_compare_and_swap
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
* This next section of code is here instead of in _hypre_utilities.h to get
* around some portability issues with Visual Studio. By putting it here, we
* can explicitly include this '.h' file in a few files in hypre and compile
* them with C++ instead of C (VS does not support C99 'inline').
******************************************************************************/
#ifdef HYPRE_USING_ATOMIC
static inline HYPRE_Int
hypre_compare_and_swap( HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval )
{
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
return __sync_val_compare_and_swap(ptr, oldval, newval);
//#elif defind _MSC_VER
//return _InterlockedCompareExchange((long *)ptr, newval, oldval);
//#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
// JSP: not many compilers have implemented this, so comment out for now
//_Atomic HYPRE_Int *atomic_ptr = ptr;
//atomic_compare_exchange_strong(atomic_ptr, &oldval, newval);
//return oldval;
#endif
}
static inline HYPRE_Int
hypre_fetch_and_add( HYPRE_Int *ptr, HYPRE_Int value )
{
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
return __sync_fetch_and_add(ptr, value);
//#elif defined _MSC_VER
//return _InterlockedExchangeAdd((long *)ptr, value);
//#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
// JSP: not many compilers have implemented this, so comment out for now
//_Atomic HYPRE_Int *atomic_ptr = ptr;
//return atomic_fetch_add(atomic_ptr, value);
#endif
}
#else // !HYPRE_USING_ATOMIC
static inline HYPRE_Int
hypre_compare_and_swap( HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval )
{
if (*ptr == oldval)
{
*ptr = newval;
return oldval;
}
else return *ptr;
}
static inline HYPRE_Int
hypre_fetch_and_add( HYPRE_Int *ptr, HYPRE_Int value )
{
HYPRE_Int oldval = *ptr;
*ptr += value;
return oldval;
}
#endif // !HYPRE_USING_ATOMIC
/******************************************************************************/
// Constants ................................................................
#define HYPRE_HOPSCOTCH_HASH_HOP_RANGE (32)
#define HYPRE_HOPSCOTCH_HASH_INSERT_RANGE (4*1024)
#define HYPRE_HOPSCOTCH_HASH_EMPTY (0)
#define HYPRE_HOPSCOTCH_HASH_BUSY (1)
// Small Utilities ..........................................................
static inline HYPRE_Int
first_lsb_bit_indx( hypre_uint x )
{
HYPRE_Int pos;
#if defined(_MSC_VER) || defined(__MINGW64__)
if (x == 0)
{
pos = 0;
}
else
{
for (pos = 1; !(x & 1); ++pos)
{
x >>= 1;
}
}
#else
pos = ffs(x);
#endif
return (pos - 1);
}
/**
* hypre_Hash is adapted from xxHash with the following license.
*/
/*
xxHash - Extremely Fast Hash algorithm
Header File
Copyright (C) 2012-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : https://github.com/Cyan4973/xxHash
*/
/***************************************
* Constants
***************************************/
#define HYPRE_XXH_PRIME32_1 2654435761U
#define HYPRE_XXH_PRIME32_2 2246822519U
#define HYPRE_XXH_PRIME32_3 3266489917U
#define HYPRE_XXH_PRIME32_4 668265263U
#define HYPRE_XXH_PRIME32_5 374761393U
#define HYPRE_XXH_PRIME64_1 11400714785074694791ULL
#define HYPRE_XXH_PRIME64_2 14029467366897019727ULL
#define HYPRE_XXH_PRIME64_3 1609587929392839161ULL
#define HYPRE_XXH_PRIME64_4 9650029242287828579ULL
#define HYPRE_XXH_PRIME64_5 2870177450012600261ULL
#define HYPRE_XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
#define HYPRE_XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
#if defined(HYPRE_MIXEDINT) || defined(HYPRE_BIGINT)
static inline HYPRE_BigInt
hypre_BigHash( HYPRE_BigInt input )
{
hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input);
hypre_ulongint k1 = input;
k1 *= HYPRE_XXH_PRIME64_2;
k1 = HYPRE_XXH_rotl64(k1, 31);
k1 *= HYPRE_XXH_PRIME64_1;
h64 ^= k1;
h64 = HYPRE_XXH_rotl64(h64, 27)*HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4;
h64 ^= h64 >> 33;
h64 *= HYPRE_XXH_PRIME64_2;
h64 ^= h64 >> 29;
h64 *= HYPRE_XXH_PRIME64_3;
h64 ^= h64 >> 32;
#ifndef NDEBUG
if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64) {
hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY);
hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64);
}
#endif
return h64;
}
#else
static inline HYPRE_Int
hypre_BigHash(HYPRE_Int input)
{
hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input);
// 1665863975 is added to input so that
// only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY.
// Hence, we're fine as long as key is non-negative.
h32 += (input + 1665863975)*HYPRE_XXH_PRIME32_3;
h32 = HYPRE_XXH_rotl32(h32, 17)*HYPRE_XXH_PRIME32_4;
h32 ^= h32 >> 15;
h32 *= HYPRE_XXH_PRIME32_2;
h32 ^= h32 >> 13;
h32 *= HYPRE_XXH_PRIME32_3;
h32 ^= h32 >> 16;
//hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32);
return h32;
}
#endif
#ifdef HYPRE_BIGINT
static inline HYPRE_Int
hypre_Hash(HYPRE_Int input)
{
hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input);
hypre_ulongint k1 = input;
k1 *= HYPRE_XXH_PRIME64_2;
k1 = HYPRE_XXH_rotl64(k1, 31);
k1 *= HYPRE_XXH_PRIME64_1;
h64 ^= k1;
h64 = HYPRE_XXH_rotl64(h64, 27)*HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4;
h64 ^= h64 >> 33;
h64 *= HYPRE_XXH_PRIME64_2;
h64 ^= h64 >> 29;
h64 *= HYPRE_XXH_PRIME64_3;
h64 ^= h64 >> 32;
#ifndef NDEBUG
if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64) {
hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY);
hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64);
}
#endif
return h64;
}
#else
static inline HYPRE_Int
hypre_Hash(HYPRE_Int input)
{
hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input);
// 1665863975 is added to input so that
// only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY.
// Hence, we're fine as long as key is non-negative.
h32 += (input + 1665863975)*HYPRE_XXH_PRIME32_3;
h32 = HYPRE_XXH_rotl32(h32, 17)*HYPRE_XXH_PRIME32_4;
h32 ^= h32 >> 15;
h32 *= HYPRE_XXH_PRIME32_2;
h32 ^= h32 >> 13;
h32 *= HYPRE_XXH_PRIME32_3;
h32 ^= h32 >> 16;
//hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32);
return h32;
}
#endif
static inline void
hypre_UnorderedIntSetFindCloserFreeBucket( hypre_UnorderedIntSet *s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
HYPRE_Int *free_bucket,
HYPRE_Int *free_dist )
{
HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = s->hopInfo[move_bucket];
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]);
if(start_seg != move_segment)
omp_set_lock(&move_segment->lock);
#endif
if (start_hop_info == s->hopInfo[move_bucket])
{
// new_free_bucket -> free_bucket and empty new_free_bucket
HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist;
s->key[*free_bucket] = s->key[new_free_bucket];
s->hash[*free_bucket] = s->hash[new_free_bucket];
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
s->hopInfo[move_bucket] |= (1U << move_free_dist);
s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
}
++move_bucket;
}
*free_bucket = -1;
*free_dist = 0;
}
static inline void
hypre_UnorderedBigIntSetFindCloserFreeBucket( hypre_UnorderedBigIntSet *s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
HYPRE_Int *free_bucket,
HYPRE_Int *free_dist )
{
HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = s->hopInfo[move_bucket];
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]);
if(start_seg != move_segment)
omp_set_lock(&move_segment->lock);
#endif
if (start_hop_info == s->hopInfo[move_bucket])
{
// new_free_bucket -> free_bucket and empty new_free_bucket
HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist;
s->key[*free_bucket] = s->key[new_free_bucket];
s->hash[*free_bucket] = s->hash[new_free_bucket];
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
s->hopInfo[move_bucket] |= (1U << move_free_dist);
s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
}
++move_bucket;
}
*free_bucket = -1;
*free_dist = 0;
}
static inline void
hypre_UnorderedIntMapFindCloserFreeBucket( hypre_UnorderedIntMap *m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
hypre_HopscotchBucket **free_bucket,
HYPRE_Int *free_dist)
{
hypre_HopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = move_bucket->hopInfo;
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]);
if (start_seg != move_segment)
omp_set_lock(&move_segment->lock);
#endif
if (start_hop_info == move_bucket->hopInfo)
{
// new_free_bucket -> free_bucket and empty new_free_bucket
hypre_HopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist;
(*free_bucket)->data = new_free_bucket->data;
(*free_bucket)->key = new_free_bucket->key;
(*free_bucket)->hash = new_free_bucket->hash;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
move_bucket->hopInfo |= (1U << move_free_dist);
move_bucket->hopInfo &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
}
++move_bucket;
}
*free_bucket = NULL;
*free_dist = 0;
}
static inline void
hypre_UnorderedBigIntMapFindCloserFreeBucket( hypre_UnorderedBigIntMap *m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
hypre_BigHopscotchBucket **free_bucket,
HYPRE_Int *free_dist)
{
hypre_BigHopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = move_bucket->hopInfo;
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]);
if (start_seg != move_segment)
omp_set_lock(&move_segment->lock);
#endif
if (start_hop_info == move_bucket->hopInfo)
{
// new_free_bucket -> free_bucket and empty new_free_bucket
hypre_BigHopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist;
(*free_bucket)->data = new_free_bucket->data;
(*free_bucket)->key = new_free_bucket->key;
(*free_bucket)->hash = new_free_bucket->hash;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
move_bucket->hopInfo |= (1U << move_free_dist);
move_bucket->hopInfo &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
}
++move_bucket;
}
*free_bucket = NULL;
*free_dist = 0;
}
void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedBigIntSetCreate( hypre_UnorderedBigIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedBigIntMapCreate( hypre_UnorderedBigIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s );
void hypre_UnorderedBigIntSetDestroy( hypre_UnorderedBigIntSet *s );
void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m );
void hypre_UnorderedBigIntMapDestroy( hypre_UnorderedBigIntMap *m );
// Query Operations .........................................................
static inline HYPRE_Int
hypre_UnorderedIntSetContains( hypre_UnorderedIntSet *s,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
#endif
HYPRE_Int bucket = hash & s->bucketMask;
hypre_uint hopInfo = s->hopInfo[bucket];
if (0 == hopInfo)
return 0;
else if (1 == hopInfo )
{
if (hash == s->hash[bucket] && key == s->key[bucket])
return 1;
else return 0;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
return 1;
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
return 0;
#endif
HYPRE_Int i;
for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i)
{
if (hash == s->hash[bucket + i] && key == s->key[bucket + i])
return 1;
}
return 0;
}
static inline HYPRE_Int
hypre_UnorderedBigIntSetContains( hypre_UnorderedBigIntSet *s,
HYPRE_BigInt key )
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[(HYPRE_Int)(hash & s->segmentMask)];
#endif
HYPRE_Int bucket = (HYPRE_Int)(hash & s->bucketMask);
hypre_uint hopInfo = s->hopInfo[bucket];
if (0 == hopInfo)
return 0;
else if (1 == hopInfo )
{
if (hash == s->hash[bucket] && key == s->key[bucket])
return 1;
else return 0;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
return 1;
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
return 0;
#endif
HYPRE_Int i;
for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i)
{
if (hash == s->hash[bucket + i] && key == s->key[bucket + i])
return 1;
}
return 0;
}
/**
* @ret -1 if key doesn't exist
*/
static inline HYPRE_Int
hypre_UnorderedIntMapGet( hypre_UnorderedIntMap *m,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
#endif
hypre_HopscotchBucket *elmAry = &(m->table[hash & m->bucketMask]);
hypre_uint hopInfo = elmAry->hopInfo;
if (0 == hopInfo)
return -1;
else if (1 == hopInfo )
{
if (hash == elmAry->hash && key == elmAry->key)
return elmAry->data;
else return -1;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_HopscotchBucket* currElm = elmAry + i;
if (hash == currElm->hash && key == currElm->key)
return currElm->data;
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
return -1;
#endif
hypre_HopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]);
HYPRE_Int i;
for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket)
{
if (hash == currBucket->hash && key == currBucket->key)
return currBucket->data;
}
return -1;
}
static inline
HYPRE_Int hypre_UnorderedBigIntMapGet( hypre_UnorderedBigIntMap *m,
HYPRE_BigInt key )
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[(HYPRE_Int)(hash & m->segmentMask)];
#endif
hypre_BigHopscotchBucket *elmAry = &(m->table[(HYPRE_Int)(hash & m->bucketMask)]);
hypre_uint hopInfo = elmAry->hopInfo;
if (0 == hopInfo)
return -1;
else if (1 == hopInfo )
{
if (hash == elmAry->hash && key == elmAry->key)
return elmAry->data;
else return -1;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_BigHopscotchBucket* currElm = elmAry + i;
if (hash == currElm->hash && key == currElm->key)
return currElm->data;
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
return -1;
#endif
hypre_BigHopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]);
HYPRE_Int i;
for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket)
{
if (hash == currBucket->hash && key == currBucket->key)
return currBucket->data;
}
return -1;
}
//status Operations .........................................................
static inline
HYPRE_Int hypre_UnorderedIntSetSize( hypre_UnorderedIntSet *s )
{
HYPRE_Int counter = 0;
HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i])
{
++counter;
}
}
return counter;
}
static inline
HYPRE_Int hypre_UnorderedBigIntSetSize( hypre_UnorderedBigIntSet *s )
{
HYPRE_Int counter = 0;
HYPRE_BigInt n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i])
{
++counter;
}
}
return counter;
}
static inline HYPRE_Int
hypre_UnorderedIntMapSize( hypre_UnorderedIntMap *m )
{
HYPRE_Int counter = 0;
HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash )
{
++counter;
}
}
return counter;
}
static inline HYPRE_Int
hypre_UnorderedBigIntMapSize( hypre_UnorderedBigIntMap *m )
{
HYPRE_Int counter = 0;
HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash )
{
++counter;
}
}
return counter;
}
HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len );
HYPRE_BigInt *hypre_UnorderedBigIntSetCopyToArray( hypre_UnorderedBigIntSet *s, HYPRE_Int *len );
//modification Operations ...................................................
static inline void
hypre_UnorderedIntSetPut( hypre_UnorderedIntSet *s,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
omp_set_lock(&segment->lock);
#endif
HYPRE_Int bucket = hash&s->bucketMask;
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = s->hopInfo[bucket];
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if(hash == s->hash[currElm] && key == s->key[currElm])
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
HYPRE_Int free_bucket = bucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket],
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
break;
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
s->key[free_bucket] = key;
s->hash[free_bucket] = hash;
s->hopInfo[bucket] |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hypre_UnorderedIntSetFindCloserFreeBucket(s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
} while (-1 != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return;
}
static inline void
hypre_UnorderedBigIntSetPut( hypre_UnorderedBigIntSet *s,
HYPRE_BigInt key )
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
omp_set_lock(&segment->lock);
#endif
HYPRE_Int bucket = (HYPRE_Int)(hash&s->bucketMask);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = s->hopInfo[bucket];
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if(hash == s->hash[currElm] && key == s->key[currElm])
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
HYPRE_Int free_bucket = bucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket],
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
break;
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
s->key[free_bucket] = key;
s->hash[free_bucket] = hash;
s->hopInfo[bucket] |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hypre_UnorderedBigIntSetFindCloserFreeBucket(s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
} while (-1 != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return;
}
static inline HYPRE_Int
hypre_UnorderedIntMapPutIfAbsent( hypre_UnorderedIntMap *m,
HYPRE_Int key, HYPRE_Int data )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
omp_set_lock(&segment->lock);
#endif
hypre_HopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = startBucket->hopInfo;
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_HopscotchBucket* currElm = startBucket + i;
if (hash == currElm->hash && key == currElm->key)
{
HYPRE_Int rc = currElm->data;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return rc;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
hypre_HopscotchBucket* free_bucket = startBucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&free_bucket->hash,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
break;
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
free_bucket->data = data;
free_bucket->key = key;
free_bucket->hash = hash;
startBucket->hopInfo |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
hypre_UnorderedIntMapFindCloserFreeBucket(m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
} while (NULL != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
static inline HYPRE_Int
hypre_UnorderedBigIntMapPutIfAbsent( hypre_UnorderedBigIntMap *m,
HYPRE_BigInt key, HYPRE_Int data)
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
omp_set_lock(&segment->lock);
#endif
hypre_BigHopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = startBucket->hopInfo;
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_BigHopscotchBucket* currElm = startBucket + i;
if (hash == currElm->hash && key == currElm->key)
{
HYPRE_Int rc = currElm->data;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return rc;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
hypre_BigHopscotchBucket* free_bucket = startBucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&free_bucket->hash,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
break;
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
free_bucket->data = data;
free_bucket->key = key;
free_bucket->hash = hash;
startBucket->hopInfo |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
hypre_UnorderedBigIntMapFindCloserFreeBucket(m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
} while (NULL != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
#ifdef __cplusplus
} // extern "C"
#endif
#endif // hypre_HOPSCOTCH_HASH_HEADER
|
GB_binop__bshift_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__bshift_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__bshift_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_int32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_int32)
// C=scalar+B GB (_bind1st__bshift_int32)
// C=scalar+B' GB (_bind1st_tran__bshift_int32)
// C=A+scalar GB (_bind2nd__bshift_int32)
// C=A'+scalar GB (_bind2nd_tran__bshift_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_bitshift_int32 (aij, bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_int32 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT32 || GxB_NO_BSHIFT_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bshift_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bshift_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bshift_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_int32 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_int32 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int32 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int32 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__pair_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_int8)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int8_t
// A type: int8_t
// A pattern? 1
// B type: int8_t
// B pattern? 1
// BinaryOp: cij = 1
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_INT8 || GxB_NO_PAIR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pair_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
_grave.c |
//PYHESAFF void detectKeypointsList(int num_fpaths,
// char** image_fpath_list,
// float** keypoints_array,
// uint8** descriptors_array,
// int* length_array,
// __HESAFF_PARAM_SIGNATURE_ARGS__
// )
//{
// assert(0); // do not use
// // Maybe use this implimentation instead to be more similar to the way
// // pyhesaff calls this library?
// int index;
// #pragma omp parallel for private(index)
// for(index = 0; index < num_fpaths; ++index)
// {
// char* image_filename = image_fpath_list[index];
// AffineHessianDetector* detector =
// new_hesaff_fpath(image_filename, __HESAFF_PARAM_CALL_ARGS__);
// detector->DBG_params();
// int length = detector->detect();
// length_array[index] = length;
// // TODO: shouldn't python be doing this allocation?
// keypoints_array[index] = new float[length * KPTS_DIM];
// descriptors_array[index] = new uint8[length * DESC_DIM];
// exportArrays(detector, length, keypoints_array[index], descriptors_array[index]);
// delete detector;
// }
//}
//void detectKeypoints(char* image_filename,
// float** keypoints,
// uint8** descriptors,
// int* length,
// __HESAFF_PARAM_SIGNATURE_ARGS__
// )
//{
// AffineHessianDetector* detector = new_hesaff_fpath(image_filename, __HESAFF_PARAM_CALL_ARGS__);
// detector->DBG_params();
// *length = detector->detect();
// // TODO: shouldn't python be doing this allocation?
// *keypoints = new float[(*length)*KPTS_DIM];
// *descriptors = new uint8[(*length)*DESC_DIM];
// detector->exportArrays((*length), *keypoints, *descriptors);
// //may need to add support for "use_adaptive_scale" and "nogravity_hack" here (needs translation from Python to C++ first)
// delete detector;
//}
//typedef void*(*allocer_t)(int, int*);
//const PYHESAFF char* cmake_build_type()
//{
// // References:
// // http://stackoverflow.com/questions/14883853/ctypes-return-a-string-from-c-function
// char *build_type = (char*) malloc(sizeof(char) * (10 + 1));
// #ifdef CMAKE_BUILD_TYPE
// //char hello[] = CMAKE_BUILD_TYPE
// strcpy(build_type, "testb1");
// #else
// strcpy(build_type, "testb2");
// #endif
// return build_type;
//}
//PYHESAFF char* free_char(char* malloced_char)
//{
// // need to free anything malloced here
// free(malloced_char);
//}
|
compressor_zfp.h | /**
* Copyright 2019 The Gamma Authors.
*
* This source code is licensed under the Apache License, Version 2.0 license
* found in the LICENSE file in the root directory of this source tree.
*/
#pragma once
#ifdef WITH_ZFP
#include <limits.h>
#include <math.h>
#include <omp.h>
#include "compressor.h"
#include "util/log.h"
#include "zfp.h"
namespace tig_gamma {
class CompressorZFP : public Compressor {
public:
CompressorZFP(CompressType type) : Compressor(type) {
LOG(INFO) << "CompressorZFP construction!";
}
~CompressorZFP() { LOG(INFO) << "CompressorZFP destroyed successfully!"; }
void Init(int d, double r = DEFAULT_RATE, int t = 0) {
dims = d;
threads = t;
int n = 4;
int remain = d % 4 == 0 ? 24 : 16;
int blocks = floor((dims + n - 1) / n);
int bits = floor(n * r + 0.5);
bits = bits > 9 ? bits : 9;
rate = (double)bits / n;
zfpsize = ((ZFP_HEADER_MAX_BITS + blocks * bits + stream_word_bits - 1) &
~(stream_word_bits - 1)) /
CHAR_BIT -
remain;
raw_len = n * d;
}
size_t GetCompressLen(int data_len = 0) { return zfpsize; }
int GetRawLen() { return raw_len; }
size_t Compress(char *data, char *output, int data_len) {
zfp_field *field = zfp_field_1d(data, type, dims);
zfp_stream *zfp = zfp_stream_open(NULL);
zfp_stream_set_rate(zfp, rate, type, 1, 0);
bitstream *b_stream;
b_stream = stream_open(output, zfpsize);
zfp_stream_set_bit_stream(zfp, b_stream);
// zfp_stream_rewind(zfp);
size_t size = (size_t)zfp_compress(zfp, field);
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(b_stream);
return size;
}
size_t CompressBatch(char *datum, char *output, int n, int data_len) {
size_t flag = n * zfpsize;
int size;
if (!threads) threads = omp_get_max_threads();
int chunks = (n + threads - 1) / threads;
#pragma omp parallel for num_threads(threads)
for (int i = 0; i < threads; i++) {
for (int j = 0; j < chunks; j++) {
if (j + i * chunks > n - 1) break;
size = Compress(datum + sizeof(float) * dims * (j + i * chunks),
output + zfpsize * (j + i * chunks), 0);
if (size == 0) {
flag = 0;
}
}
}
return flag;
}
size_t Decompress(char *data, char *output, int data_len) {
zfp_field *field = zfp_field_1d(output, type, dims);
zfp_stream *zfp = zfp_stream_open(NULL);
zfp_stream_set_rate(zfp, rate, type, 1, 0);
/* zfp_stream_set_execution(zfp, zfp_exec_omp); */
/* zfp_stream_set_reversible(zfp); */
bitstream *b_stream;
zfp_field_set_pointer(field, output);
b_stream = stream_open(data, zfpsize);
zfp_stream_set_bit_stream(zfp, b_stream);
// zfp_stream_rewind(zfp);
size_t size = (size_t)zfp_decompress(zfp, field);
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(b_stream);
return size;
}
size_t DecompressBatch(char *datum, char *output, int n, int data_len) {
size_t flag = n * zfpsize;
int size;
if (!threads) threads = omp_get_max_threads();
int chunks = (n + threads - 1) / threads;
#pragma omp parallel for num_threads(threads)
for (int i = 0; i < threads; i++) {
for (int j = 0; j < chunks; j++) {
if (j + i * chunks > n - 1) break;
size = Decompress(datum + zfpsize * (j + i * chunks),
output + sizeof(float) * dims * (j + i * chunks), 0);
if (size == 0) {
flag = 0;
}
}
}
return flag;
}
private:
int dims; // the dims of 1D_array
double rate; // the rate of compress, default is 16
int threads;
size_t zfpsize;
int raw_len;
zfp_type type = zfp_type_float;
};
} // namespace tig_gamma
#endif // WITH_ZFP
|
valid.yolo8.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_256_34_34_512_1_1.h"
#include "gen_ukr_A4B2gemm_1_256_34_34_512_1_1.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 34;
int Ny = 34;
int Nh = 1;
long long Astrides[6] = {0,1,2,3,4,5};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<512+0;c5+=512)
{
for(int f5=0;f5<256+0;f5+=256)
{
for(int xy5=0;xy5<1156+0;xy5+=1156)
{
for(int c4=c5;c4<min(512, 512+c5);c4+=512)
{
for(int f4=f5;f4<min(256, 256+f5);f4+=256)
{
for(int xy4=xy5;xy4<min(1156, 1156+xy5);xy4+=1156)
{
for(int c3=c4;c3<min(512, 512+c4);c3+=Tc1)
{
for(int xy3=xy4;xy3<min(1156, 1156+xy4);xy3+=Txy3)
{
for(int f3=f4;f3<min(256, 256+f4);f3+=Tf2)
{
for(int xy2=xy3;xy2<min(1156,Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(256, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(512, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(512, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(1156, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(256, 16+f2);f1+=16)
{
int ctile=min(Tc1, 512-c1);
int x1=xy1/34;
int y1=xy1%34/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*591872+c1_1*1156+1*x1*34+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*8192+c1*16+0*16+0*16+kf1_2*1;
int offsetC=0+b1*295936+of1_1*1156+x1*34+y1*1+of1_2*1;
if(34-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(34*34-xy1>=6){
for(int sti=34-y1;sti<6;sti+=1)
{
Astrides[sti]+=0;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=34-y1;sti<6;sti+=1)
{
Astrides[sti]-=0;
}
}
else{
cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
disk.h | #pragma once
class SolidDisk{
public:
static PS::S32 n_init;
static PS::F64 m_init;
static PS::F64 p;
//static PS::F64 f_in;
//static PS::F64 f_out;
static PS::F64 f_dust;
static PS::F64 eta_ice;
static PS::F64 a_in;
static PS::F64 a_out;
static PS::F64 a_ice;
static PS::F64 ecc_hill;
static PS::F64 inc_hill;
static PS::F64 calcDustMass(const PS::F64 a0,
const PS::F64 a1,
const bool inIce) {
const PS::F64 L_CGS = 14959787070000;
const PS::F64 M_CGS = 1.9884e33;
if ( a1 < a0 ) return 0.;
if ( inIce ) {
const PS::F64 coef_in = 10. * f_dust /M_CGS*L_CGS*L_CGS;
return 2.*M_PI*coef_in/(2.-p) * ( pow(a1, 2.-p) - pow(a0, 2.-p) );
} else {
const PS::F64 coef_out = 10. * f_dust * eta_ice /M_CGS*L_CGS*L_CGS;
return 2.*M_PI*coef_out/(2.-p) * ( pow(a1, 2.-p) - pow(a0, 2.-p) );
}
}
static PS::F64 getSemimajorAxis(const PS::F64 a0,
const PS::F64 a1) {
assert ( a0 < a1 );
PS::F64 R = drand48();
if ( p != 2 ){
return pow( (pow(a1,2.-p) - pow(a0,2.-p)) * R + pow(a0,2.-p), 1./(2.-p) );
} else {
return exp( (log(a1) - log(a0)) * R + log(a0) );
}
}
template <class Tpsys>
static void createInitialCondition(Tpsys & pp){
if ( PS::Comm::getRank() == 0 ){
const PS::F64 m_sun = FPGrav::m_sun;
PS::F64 m_in = 0.;
PS::F64 m_out = 0.;
PS::S32 n_in = 0;
//PS::S32 n_out = 0;
////////////////////////////////////
/* Set Particle Mass & Number */
////////////////////////////////////
if ( a_out < a_ice ) {
m_in = calcDustMass(a_in, a_out, true);
m_out = 0.;
} else if ( a_ice < a_in ) {
m_in = 0.;
m_out = calcDustMass(a_in, a_out, false);
} else {
m_in = calcDustMass(a_in, a_ice, true);
m_out = calcDustMass(a_ice, a_out, false);
}
assert( n_init >= 0 );
assert( m_init >= 0. );
if ( m_init == 0. ) {
assert( n_init > 0 );
m_init = (m_in + m_out) / n_init;
}
if ( n_init == 0 ){
assert( m_init > 0. );
n_init = (m_in + m_out) / m_init;
}
n_in = (PS::S32)round(m_in/(m_in + m_out) * n_init);
//n_out = n_init - n_in;
////////////////////////////////
/* Create Particle System */
////////////////////////////////
pp.setNumberOfParticleLocal(n_init);
for ( PS::S32 i=0; i<n_init; i++ ){
pp[i].id = i;
pp[i].mass = m_init;
// set orbital element
PS::F64 ax;
PS::F64 h = pow(pp[i].mass/(3.*m_sun), 1./3.);
if ( a_out < a_ice || a_ice < a_in ) {
ax = getSemimajorAxis(a_in, a_out);
} else {
if ( i < n_in ) {
ax = getSemimajorAxis(a_in, a_ice);
} else {
ax = getSemimajorAxis(a_ice, a_out);
}
}
PS::F64 ecc = getGaussian(ecc_hill*h);
PS::F64 inc = getGaussian(inc_hill*h);
PS::F64 l = 2 * M_PI * drand48();
PS::F64 u = solveKeplerEq(l, ecc);
PS::F64 omg = 2 * M_PI * drand48();
PS::F64 OMG = 2 * M_PI * drand48();
PS::F64 n = sqrt(m_sun / (ax*ax*ax));
PS::F64vec P, Q;
P.x = cos(omg)*cos(OMG) - sin(omg)*sin(OMG)*cos(inc);
P.y = cos(omg)*sin(OMG) + sin(omg)*cos(OMG)*cos(inc);
P.z = sin(omg)*sin(inc);
Q.x = -sin(omg)*cos(OMG) - cos(omg)*sin(OMG)*cos(inc);
Q.y = -sin(omg)*sin(OMG) + cos(omg)*cos(OMG)*cos(inc);
Q.z = cos(omg)*sin(inc);
orbitalElement2PosVel(pp[i].pos, pp[i].vel, m_sun,
ax, ecc, n, u, P, Q);
}
} else {
pp.setNumberOfParticleLocal(0);
}
}
};
PS::S32 SolidDisk::n_init = 0;
PS::F64 SolidDisk::m_init = 0.;
PS::F64 SolidDisk::p = 1.5;
PS::F64 SolidDisk::f_dust = 0.71;
PS::F64 SolidDisk::eta_ice = 30./7.1;
PS::F64 SolidDisk::a_in = 0.98;
PS::F64 SolidDisk::a_out = 1.02;
PS::F64 SolidDisk::a_ice = 2.0;
PS::F64 SolidDisk::ecc_hill = 2.0;
PS::F64 SolidDisk::inc_hill = 1.0;
class GasDisk{
public:
static PS::F64 alpha_gas;
static PS::F64 beta_gas;
static PS::F64 f_gas;
static PS::F64 tau_gas;
static PS::F64 C_d;
static PS::F64 mu;
PS::F64 coef_rho_gas;
PS::F64 coef_cs_vk;
PS::F64 coef_acc_gd;
GasDisk(){
const PS::F64 L_CGS = 14959787070000;
const PS::F64 M_CGS = 1.9884e33;
const PS::F64 T = 365.25*24.*60.*60./(2.*M_PI);
coef_rho_gas = 1.4e-9 * f_gas /M_CGS*L_CGS*L_CGS*L_CGS;
const PS::F64 k_B = 1.380649e-16 /(M_CGS*L_CGS*L_CGS)*T*T;
const PS::F64 N_A = 6.022140857e23;
const PS::F64 m_H = 1./N_A /M_CGS;
PS::F64 coef_cs = sqrt(k_B * 280 / (mu * m_H));
PS::F64 coef_vk = sqrt(FPGrav::m_sun);
coef_cs_vk = coef_cs / coef_vk;
coef_acc_gd = 0.5*C_d*M_PI;
if ( PS::Comm::getRank() == 0 ) {
std::cout << "rho_gas at 1 AU = " << coef_rho_gas << std::endl
<< "cs/vk at 1 AU = " << coef_cs_vk << std::endl;
}
}
template <class Tpsys>
void calcGasDrag(Tpsys & pp,
PS::F64 time,
PS::F64 L=1.,
bool clear=true){
const PS::S32 n_loc = pp.getNumberOfParticleLocal();
#pragma omp parallel for
for(PS::S32 i=0; i<n_loc; i++){
PS::F64 r2 = pp[i].pos.x*pp[i].pos.x + pp[i].pos.y*pp[i].pos.y;
PS::F64 r_inv = 1./sqrt(r2);
PS::F64 r = r2 * r_inv;
PS::F64 rho_gas = coef_rho_gas * pow(r, -alpha_gas);
if ( tau_gas != 0. ) rho_gas *= exp(-time / tau_gas);
PS::F64 cs_vk = coef_cs_vk * sqrt(sqrt(r)) * pow(L, 1./8.);
PS::F64vec ev(-pp[i].pos.y*r_inv, pp[i].pos.x*r_inv, 0.0);
PS::F64vec vkep = sqrt(FPGrav::m_sun * r_inv) * ev;
PS::F64 eta = 0.5 * (alpha_gas + beta_gas) * cs_vk * cs_vk;
PS::F64vec vgas = (1.0 - eta)*vkep;
PS::F64vec u = pp[i].vel - vgas;
//PRL(eta);
//PS::F64 rplanet = cbrt(0.75*pp[i].mass/(M_PI*FPGrav::dens));
if (clear) pp[i].acc_gd = 0.;
if ( pp[i].mass != 0. ) {
//pp[i].acc_gd += -coef_acc_gd * rplanet * rplanet * rho_gas * sqrt(u*u) * u / pp[i].mass;
pp[i].acc_gd += -coef_acc_gd * pp[i].r_planet * pp[i].r_planet * rho_gas * sqrt(u*u) * u / pp[i].mass;
pp[i].acc += pp[i].acc_gd;
}
}
}
};
PS::F64 GasDisk::alpha_gas = 11./4.;
PS::F64 GasDisk::beta_gas = 0.5;
PS::F64 GasDisk::f_gas = 0.71;
PS::F64 GasDisk::tau_gas = 1.e6*2.*M_PI;
PS::F64 GasDisk::C_d = 1.;
PS::F64 GasDisk::mu = 2.34;
|
test.c |
#include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (1024*3)
#define M (65)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
double A[M][N], B[M][N], C[N], D[N], E[N];
double S[M];
double p[2];
int main(void) {
check_offloading();
INIT();
int cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
//
// Test: proc_bind clause
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(master)
#include "defines.h"
for (int tt = 1; tt <= 64; tt++) {
int t = (t < 32) ? 1 : tt;
int threads[1]; threads[0] = t-1;
NESTED_PARALLEL_FOR(
int tid = omp_get_thread_num(); \
S[tid] = 0; \
for (int i = 0; i < N; i++) { \
A[tid][i] = B[tid][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[tid][i] += C[i] + D[i]; \
B[tid][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[tid][i] + B[tid][i];
}
S[tid] += tmp;
},
VERIFY(0, t, S[i], (double) SUMS * (N/2*(N+1))))
}
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(close)
#include "defines.h"
for (int tt = 1; tt <= 64; tt++) {
int t = (t < 32) ? 1 : tt;
int threads[1]; threads[0] = t-1;
NESTED_PARALLEL_FOR(
int tid = omp_get_thread_num(); \
S[tid] = 0; \
for (int i = 0; i < N; i++) { \
A[tid][i] = B[tid][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[tid][i] += C[i] + D[i]; \
B[tid][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[tid][i] + B[tid][i];
}
S[tid] += tmp;
},
VERIFY(0, t, S[i], (double) SUMS * (N/2*(N+1))))
}
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(spread)
#include "defines.h"
for (int tt = 1; tt <= 64; tt++) {
int t = (t < 32) ? 1 : tt;
int threads[1]; threads[0] = t-1;
NESTED_PARALLEL_FOR(
int tid = omp_get_thread_num(); \
S[tid] = 0; \
for (int i = 0; i < N; i++) { \
A[tid][i] = B[tid][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[tid][i] += C[i] + D[i]; \
B[tid][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[tid][i] + B[tid][i];
}
S[tid] += tmp;
},
VERIFY(0, t, S[i], (double) SUMS * (N/2*(N+1))))
}
//
// Test: private, shared clauses on omp target parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES private(p,q) shared(A,B,C,D,E)
#include "defines.h"
for (int tt = 1; tt <= 64; tt++) {
int t = (t < 32) ? 1 : tt;
int threads[1]; threads[0] = t;
NESTED_PARALLEL_FOR(
double p = 2; \
double q = 4; \
int tid = omp_get_thread_num(); \
S[tid] = 0; \
for (int i = 0; i < N; i++) { \
A[tid][i] = B[tid][i] = 0; \
},
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[tid][i] += p; \
B[tid][i] += q; \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[tid][i] + B[tid][i];
}
S[tid] += tmp;
},
VERIFY(0, t, S[i], (double) 6 + SUMS * (N/2*(N+1))))
}
//
// Test: firstprivate clause on omp target parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p,q)
#include "defines.h"
for (int tt = 1; tt <= 64; tt++) {
int t = (t < 32) ? 1 : tt;
int threads[1]; threads[0] = t;
NESTED_PARALLEL_FOR(
double p = -4; \
double q = 4; \
int tid = omp_get_thread_num(); \
S[tid] = 0; \
for (int i = 0; i < N; i++) { \
A[tid][i] = B[tid][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[tid][i] += C[i] + D[i] + p; \
B[tid][i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[tid][i] + B[tid][i];
}
S[tid] += tmp;
},
VERIFY(0, t, S[i], (double) SUMS * (N/2*(N+1))))
}
//
// Test: lastprivate clause on omp target parallel for with nested parallel.
//
for (int tt = 1; tt <= 64; tt++) {
int t = (t < 32) ? 1 : tt;
int threads[1]; threads[0] = t;
TESTD("omp target parallel num_threads(t)", {
double q0[1];
double q1[1];
double q2[1];
double q3[1];
int tid = omp_get_thread_num();
S[tid] = 0;
for (int i = 0; i < N; i++) {
A[tid][i] = B[tid][i] = 0;
}
_Pragma("omp parallel for lastprivate(q0) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q0[0] = C[i] + D[i];
A[tid][i] += q0[0];
}
_Pragma("omp parallel for schedule(auto) lastprivate(q1) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q1[0] = C[i] + D[i];
A[tid][i] += q1[0];
}
_Pragma("omp parallel for schedule(static) lastprivate(q2) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q2[0] = D[i] + E[i];
B[tid][i] += q2[0];
}
_Pragma("omp parallel for schedule(static,9) lastprivate(q3) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q3[0] = D[i] + E[i];
B[tid][i] += q3[0];
}
double tmp = q0[0] + q1[0] + q2[0] + q3[0];
for (int i = 0; i < N; i++) {
tmp += A[tid][i] + B[tid][i];
}
S[tid] += tmp;
}, VERIFY(0, t, S[i], (double) 2 * (N + (N/2*(N+1))) ));
}
//
// Test: private clause on omp target parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES private(p)
#include "defines.h"
for (int tt = 1; tt <= 64; tt++) {
int t = (t < 32) ? 1 : tt;
int threads[1]; threads[0] = t;
NESTED_PARALLEL_FOR(
double p[2]; \
p[0] = 2; p[1] = 4; \
int tid = omp_get_thread_num(); \
S[tid] = 0; \
for (int i = 0; i < N; i++) { \
A[tid][i] = B[tid][i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p[0] = C[i] + D[i]; \
p[1] = D[i] + E[i]; \
A[tid][i] += p[0]; \
B[tid][i] += p[1]; \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[tid][i] + B[tid][i];
}
S[tid] += tmp;
},
VERIFY(0, t, S[i], (double) 6 + SUMS * (N/2*(N+1))))
}
//
// Test: firstprivate clause on omp target parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p)
#include "defines.h"
for (int tt = 1; tt <= 64; tt++) {
int t = (t < 32) ? 1 : tt;
int threads[1]; threads[0] = t;
NESTED_PARALLEL_FOR(
double p[2]; \
p[0] = -4; p[1] = 4; \
int tid = omp_get_thread_num(); \
S[tid] = 0; \
for (int i = 0; i < N; i++) { \
A[tid][i] = B[tid][i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[tid][i] += C[i] + D[i] + p[0]; \
B[tid][i] += D[i] + E[i] + p[1]; \
if (i == N-1) { \
p[0] += 6; \
p[1] += 9; \
} \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[tid][i] + B[tid][i];
}
S[tid] += tmp;
},
VERIFY(0, t, S[i], (double) SUMS * (N/2*(N+1))))
}
//
// Test: collapse clause on omp target parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES collapse(2)
#include "defines.h"
for (int tt = 1; tt <= 64; tt++) {
int t = (t < 32) ? 1 : tt;
int threads[1]; threads[0] = t;
NESTED_PARALLEL_FOR(
int tid = omp_get_thread_num(); \
S[tid] = 0; \
for (int i = 0; i < N; i++) { \
A[tid][i] = B[tid][i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[tid][i*3+j] += C[i*3+j] + D[i*3+j]; \
B[tid][i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[tid][i] + B[tid][i];
}
S[tid] += tmp;
},
VERIFY(0, t, S[i], (double) SUMS * (N/2*(N+1))))
}
//
// Test: ordered clause on omp target parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES ordered
#include "defines.h"
for (int t = 1; t <= 64; t += 64) {
int threads[1]; threads[0] = t;
NESTED_PARALLEL_FOR(
int tid = omp_get_thread_num(); \
S[tid] = 0; \
,
for (int i = 0; i < N; i++) { \
_Pragma("omp ordered") \
S[tid] += C[i] + D[i]; \
}
,
{
},
VERIFY(0, t, S[i], (double) SUMS * (N/2*(N+1))))
}
//
// Test: Ensure coalesced scheduling on GPU.
//
if (!cpuExec) {
TESTD("omp target parallel num_threads(32)", {
int tid = omp_get_thread_num();
S[tid] = 0;
for (int i = 0; i < 96; i++) {
A[tid][i] = 0;
}
_Pragma("omp parallel for num_threads(32)")
for (int i = 0; i < 96; i++) {
A[tid][i] += i - omp_get_thread_num();
}
_Pragma("omp parallel for schedule(auto) num_threads(32)")
for (int i = 0; i < 96; i++) {
A[tid][i] += i - omp_get_thread_num();
}
_Pragma("omp parallel for schedule(static,1) num_threads(32)")
for (int i = 0; i < 96; i++) {
A[tid][i] += i - omp_get_thread_num();
}
double tmp = 0;
for (int i = 0; i < 96; i++) {
tmp += A[tid][i];
}
S[tid] = tmp;
}, VERIFY(0, 32, S[i], (double) 3 * 95 * 48 ));
} else {
DUMP_SUCCESS(1);
}
//DUMP_SUCCESS(1);
return 0;
}
|
BF_std.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 1996-2001,2008,2010,2011 by Solar Designer
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
* A public domain version of this code, with reentrant and crypt(3)
* interfaces added, but optimizations specific to password cracking
* removed, is available at:
*
* http://www.openwall.com/crypt/
*
* This implementation is compatible with OpenBSD bcrypt.c (version 2a)
* by Niels Provos <provos at citi.umich.edu>, and uses some of his
* ideas. The password hashing algorithm was designed by David Mazieres
* <dm at lcs.mit.edu>.
*
* There's a paper on the algorithm that explains its design decisions:
*
* http://www.usenix.org/events/usenix99/provos.html
*
* Some of the tricks in BF_ROUND might be inspired by Eric Young's
* Blowfish library (I can't be sure if I would think of something if I
* hadn't seen his code).
*/
#include <stdlib.h>
#include <string.h>
#include "arch.h"
#include "common.h"
#include "BF_std.h"
BF_binary BF_out[BF_N];
/* Number of Blowfish rounds, this is also hardcoded into a few places */
#define BF_ROUNDS 16
typedef BF_word BF_key[BF_ROUNDS + 2];
struct BF_ctx {
BF_word S[4][0x100];
BF_key P;
};
#if BF_N > 1
#define INDICES [BF_N]
#define INDEX [index]
#define INDEX0 [index]
#define for_each_index() \
for (index = 0; index < BF_N; index++)
#else
#define INDICES
#define INDEX
#define INDEX0 [0]
#define for_each_index()
#endif
#if BF_X2
#if BF_mt > 1
#define INDEX2 [index & 1]
#else
#define INDEX2 [index]
#endif
#else
#define INDEX2
#endif
#if BF_mt > 1
#if BF_X2
#define for_each_t() \
for (t = 0; t < n; t += 2)
#define for_each_ti() \
for (index = t; index <= t + 1; index++)
#else
#define for_each_t() \
for (t = 0; t < n; t++)
#define for_each_ti() \
index = t;
#endif
#else
#define for_each_t()
#define for_each_ti() \
for_each_index()
#endif
#if BF_mt == 1
/* Current Blowfish context */
#if BF_ASM
extern
#else
static
#endif
struct BF_ctx CC_CACHE_ALIGN BF_current INDICES;
#endif
/* Current Blowfish key */
static BF_key CC_CACHE_ALIGN BF_exp_key INDICES;
#if defined(__linux__) && defined(__sparc__)
static BF_key BF_init_key INDICES;
#else
static BF_key CC_CACHE_ALIGN BF_init_key INDICES;
#endif
/*
* Magic IV for 64 Blowfish encryptions that we do at the end.
* The string is "OrpheanBeholderScryDoubt" on big-endian.
*/
static BF_word BF_magic_w[6] = {
0x4F727068, 0x65616E42, 0x65686F6C,
0x64657253, 0x63727944, 0x6F756274
};
/*
* P-box and S-box tables initialized with digits of Pi.
*/
static struct BF_ctx BF_init_state = {
{
{
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7,
0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99,
0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e,
0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee,
0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef,
0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e,
0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440,
0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce,
0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e,
0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677,
0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032,
0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88,
0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e,
0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0,
0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98,
0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88,
0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6,
0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d,
0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7,
0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba,
0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f,
0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09,
0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb,
0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279,
0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab,
0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82,
0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573,
0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0,
0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790,
0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8,
0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0,
0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7,
0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad,
0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1,
0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9,
0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477,
0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49,
0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af,
0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5,
0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41,
0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400,
0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915,
0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a
}, {
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623,
0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266,
0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e,
0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6,
0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e,
0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1,
0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8,
0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff,
0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701,
0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7,
0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331,
0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf,
0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e,
0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87,
0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2,
0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16,
0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b,
0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509,
0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3,
0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f,
0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4,
0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960,
0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28,
0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802,
0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510,
0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf,
0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e,
0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50,
0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8,
0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281,
0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696,
0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128,
0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0,
0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0,
0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250,
0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3,
0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00,
0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061,
0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e,
0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735,
0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9,
0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340,
0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7
}, {
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934,
0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068,
0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840,
0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45,
0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a,
0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb,
0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6,
0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42,
0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2,
0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb,
0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b,
0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33,
0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3,
0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc,
0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564,
0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b,
0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922,
0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728,
0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e,
0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37,
0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804,
0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b,
0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb,
0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d,
0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350,
0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9,
0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe,
0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d,
0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f,
0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61,
0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9,
0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2,
0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e,
0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633,
0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169,
0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52,
0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5,
0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62,
0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76,
0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24,
0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4,
0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c,
0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0
}, {
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b,
0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe,
0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4,
0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8,
0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304,
0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22,
0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6,
0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9,
0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593,
0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51,
0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c,
0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b,
0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c,
0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd,
0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319,
0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb,
0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991,
0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32,
0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166,
0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae,
0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5,
0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47,
0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d,
0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84,
0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8,
0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd,
0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7,
0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38,
0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c,
0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525,
0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442,
0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964,
0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8,
0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d,
0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299,
0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02,
0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614,
0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a,
0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b,
0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0,
0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e,
0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9,
0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6
}
}, {
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,
0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89,
0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917,
0x9216d5d9, 0x8979fb1b
}
};
/*
* Same charset, different order -- can't use the common.c table here.
*/
unsigned char BF_atoi64[0x80] = {
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 0, 1,
54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 64, 64, 64, 64, 64,
64, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 64, 64, 64, 64, 64,
64, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 64, 64, 64, 64, 64
};
#if ARCH_LITTLE_ENDIAN
static void BF_swap(BF_word *x, int count)
{
BF_word tmp;
do {
tmp = *x;
tmp = (tmp << 16) | (tmp >> 16);
*x++ = ((tmp & 0x00FF00FF) << 8) | ((tmp >> 8) & 0x00FF00FF);
} while (--count);
}
#else
#define BF_swap(x, count)
#endif
#if BF_SCALE
/* Architectures that can shift addresses left by 2 bits with no extra cost */
#define BF_ROUND(ctx, L, R, N, tmp1, tmp2, tmp3, tmp4) \
tmp1 = L & 0xFF; \
tmp2 = L >> 8; \
tmp2 &= 0xFF; \
tmp3 = L >> 16; \
tmp3 &= 0xFF; \
tmp4 = L >> 24; \
tmp1 = ctx.S[3][tmp1]; \
tmp2 = ctx.S[2][tmp2]; \
tmp3 = ctx.S[1][tmp3]; \
tmp3 += ctx.S[0][tmp4]; \
tmp3 ^= tmp2; \
R ^= ctx.P[N + 1]; \
tmp3 += tmp1; \
R ^= tmp3;
#else
/* Architectures with no complicated addressing modes supported */
#define BF_INDEX(S, i) \
(*((BF_word *)(((unsigned char *)S) + (i))))
#define BF_ROUND(ctx, L, R, N, tmp1, tmp2, tmp3, tmp4) \
tmp1 = L & 0xFF; \
tmp1 <<= 2; \
tmp2 = L >> 6; \
tmp2 &= 0x3FC; \
tmp3 = L >> 14; \
tmp3 &= 0x3FC; \
tmp4 = L >> 22; \
tmp4 &= 0x3FC; \
tmp1 = BF_INDEX(ctx.S[3], tmp1); \
tmp2 = BF_INDEX(ctx.S[2], tmp2); \
tmp3 = BF_INDEX(ctx.S[1], tmp3); \
tmp3 += BF_INDEX(ctx.S[0], tmp4); \
tmp3 ^= tmp2; \
R ^= ctx.P[N + 1]; \
tmp3 += tmp1; \
R ^= tmp3;
#endif
/*
* Encrypt one block, BF_ROUNDS is hardcoded here.
*/
#define BF_ENCRYPT(ctx, L, R) \
L ^= ctx.P[0]; \
BF_ROUND(ctx, L, R, 0, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 1, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 2, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 3, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 4, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 5, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 6, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 7, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 8, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 9, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 10, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 11, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 12, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 13, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 14, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 15, u1, u2, u3, u4); \
u4 = R; \
R = L; \
L = u4 ^ ctx.P[BF_ROUNDS + 1];
#if BF_ASM
extern void (*BF_body)(void);
#else
#if BF_X2
/*
* Encrypt two blocks in parallel, BF_ROUNDS is hardcoded here.
*/
#define BF_ENCRYPT2 \
L0 ^= BF_current[0].P[0]; \
L1 ^= BF_current[1].P[0]; \
BF_ROUND(BF_current[0], L0, R0, 0, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 0, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 1, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 1, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 2, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 2, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 3, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 3, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 4, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 4, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 5, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 5, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 6, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 6, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 7, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 7, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 8, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 8, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 9, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 9, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 10, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 10, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 11, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 11, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 12, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 12, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 13, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 13, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 14, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 14, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 15, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 15, v1, v2, v3, v4); \
u4 = R0; \
v4 = R1; \
R0 = L0; \
R1 = L1; \
L0 = u4 ^ BF_current[0].P[BF_ROUNDS + 1]; \
L1 = v4 ^ BF_current[1].P[BF_ROUNDS + 1];
#define BF_body() \
L0 = R0 = L1 = R1 = 0; \
ptr = BF_current[0].P; \
do { \
BF_ENCRYPT2; \
*ptr = L0; \
*(ptr + 1) = R0; \
*(ptr + (BF_current[1].P - BF_current[0].P)) = L1; \
*(ptr + (BF_current[1].P - BF_current[0].P) + 1) = R1; \
ptr += 2; \
} while (ptr < &BF_current[0].P[BF_ROUNDS + 2]); \
\
ptr = BF_current[0].S[0]; \
do { \
ptr += 2; \
BF_ENCRYPT2; \
*(ptr - 2) = L0; \
*(ptr - 1) = R0; \
*(ptr - 2 + (BF_current[1].S[0] - BF_current[0].S[0])) = L1; \
*(ptr - 1 + (BF_current[1].S[0] - BF_current[0].S[0])) = R1; \
} while (ptr < &BF_current[0].S[3][0xFF]);
#else
#define BF_body() \
L0 = R0 = 0; \
ptr = BF_current.P; \
do { \
BF_ENCRYPT(BF_current, L0, R0); \
*ptr = L0; \
*(ptr + 1) = R0; \
ptr += 2; \
} while (ptr < &BF_current.P[BF_ROUNDS + 2]); \
\
ptr = BF_current.S[0]; \
do { \
ptr += 2; \
BF_ENCRYPT(BF_current, L0, R0); \
*(ptr - 2) = L0; \
*(ptr - 1) = R0; \
} while (ptr < &BF_current.S[3][0xFF]);
#endif
#endif
void BF_std_set_key(char *key, int index, int sign_extension_bug)
{
char *ptr = key;
int i, j;
BF_word tmp;
for (i = 0; i < BF_ROUNDS + 2; i++) {
tmp = 0;
for (j = 0; j < 4; j++) {
tmp <<= 8;
if (sign_extension_bug)
tmp |= (int)(signed char)*ptr;
else
tmp |= (unsigned char)*ptr;
if (!*ptr) ptr = key; else ptr++;
}
BF_exp_key INDEX[i] = tmp;
BF_init_key INDEX[i] = BF_init_state.P[i] ^ tmp;
}
}
void BF_std_crypt(BF_salt *salt, int n)
{
#if BF_mt > 1
int t;
#endif
#if BF_mt > 1 && defined(_OPENMP)
#pragma omp parallel for default(none) private(t) shared(n, BF_init_state, BF_init_key, BF_exp_key, salt, BF_magic_w, BF_out)
#endif
for_each_t() {
#if BF_mt > 1
#if BF_X2
struct BF_ctx BF_current[2];
#else
struct BF_ctx BF_current;
#endif
#endif
BF_word L0, R0;
BF_word u1, u2, u3, u4;
#if BF_X2
BF_word L1, R1;
BF_word v1, v2, v3, v4;
#endif
BF_word *ptr;
BF_word count;
#if BF_N > 1
int index;
#endif
for_each_ti() {
int i;
memcpy(BF_current INDEX2.S,
BF_init_state.S, sizeof(BF_current INDEX2.S));
memcpy(BF_current INDEX2.P,
BF_init_key INDEX, sizeof(BF_current INDEX2.P));
L0 = R0 = 0;
for (i = 0; i < BF_ROUNDS + 2; i += 2) {
L0 ^= salt->salt[i & 2];
R0 ^= salt->salt[(i & 2) + 1];
BF_ENCRYPT(BF_current INDEX2, L0, R0);
BF_current INDEX2.P[i] = L0;
BF_current INDEX2.P[i + 1] = R0;
}
ptr = BF_current INDEX2.S[0];
do {
ptr += 4;
L0 ^= salt->salt[(BF_ROUNDS + 2) & 3];
R0 ^= salt->salt[(BF_ROUNDS + 3) & 3];
BF_ENCRYPT(BF_current INDEX2, L0, R0);
*(ptr - 4) = L0;
*(ptr - 3) = R0;
L0 ^= salt->salt[(BF_ROUNDS + 4) & 3];
R0 ^= salt->salt[(BF_ROUNDS + 5) & 3];
BF_ENCRYPT(BF_current INDEX2, L0, R0);
*(ptr - 2) = L0;
*(ptr - 1) = R0;
} while (ptr < &BF_current INDEX2.S[3][0xFF]);
}
count = 1 << salt->rounds;
do {
for_each_ti() {
BF_current INDEX2.P[0] ^= BF_exp_key INDEX[0];
BF_current INDEX2.P[1] ^= BF_exp_key INDEX[1];
BF_current INDEX2.P[2] ^= BF_exp_key INDEX[2];
BF_current INDEX2.P[3] ^= BF_exp_key INDEX[3];
BF_current INDEX2.P[4] ^= BF_exp_key INDEX[4];
BF_current INDEX2.P[5] ^= BF_exp_key INDEX[5];
BF_current INDEX2.P[6] ^= BF_exp_key INDEX[6];
BF_current INDEX2.P[7] ^= BF_exp_key INDEX[7];
BF_current INDEX2.P[8] ^= BF_exp_key INDEX[8];
BF_current INDEX2.P[9] ^= BF_exp_key INDEX[9];
BF_current INDEX2.P[10] ^= BF_exp_key INDEX[10];
BF_current INDEX2.P[11] ^= BF_exp_key INDEX[11];
BF_current INDEX2.P[12] ^= BF_exp_key INDEX[12];
BF_current INDEX2.P[13] ^= BF_exp_key INDEX[13];
BF_current INDEX2.P[14] ^= BF_exp_key INDEX[14];
BF_current INDEX2.P[15] ^= BF_exp_key INDEX[15];
BF_current INDEX2.P[16] ^= BF_exp_key INDEX[16];
BF_current INDEX2.P[17] ^= BF_exp_key INDEX[17];
}
BF_body();
u1 = salt->salt[0];
u2 = salt->salt[1];
u3 = salt->salt[2];
u4 = salt->salt[3];
for_each_ti() {
BF_current INDEX2.P[0] ^= u1;
BF_current INDEX2.P[1] ^= u2;
BF_current INDEX2.P[2] ^= u3;
BF_current INDEX2.P[3] ^= u4;
BF_current INDEX2.P[4] ^= u1;
BF_current INDEX2.P[5] ^= u2;
BF_current INDEX2.P[6] ^= u3;
BF_current INDEX2.P[7] ^= u4;
BF_current INDEX2.P[8] ^= u1;
BF_current INDEX2.P[9] ^= u2;
BF_current INDEX2.P[10] ^= u3;
BF_current INDEX2.P[11] ^= u4;
BF_current INDEX2.P[12] ^= u1;
BF_current INDEX2.P[13] ^= u2;
BF_current INDEX2.P[14] ^= u3;
BF_current INDEX2.P[15] ^= u4;
BF_current INDEX2.P[16] ^= u1;
BF_current INDEX2.P[17] ^= u2;
}
BF_body();
} while (--count);
#if BF_mt == 1
for_each_ti() {
L0 = BF_magic_w[0];
R0 = BF_magic_w[1];
count = 64;
do {
BF_ENCRYPT(BF_current INDEX, L0, R0);
} while (--count);
BF_out INDEX0[0] = L0;
BF_out INDEX0[1] = R0;
}
#else
for_each_ti() {
BF_word L, R;
BF_word u1, u2, u3, u4;
BF_word count;
int i;
memcpy(&BF_out[index], &BF_magic_w,
sizeof(BF_out[index]));
count = 64;
do
for (i = 0; i < 6; i += 2) {
L = BF_out[index][i];
R = BF_out[index][i + 1];
BF_ENCRYPT(BF_current INDEX2, L, R);
BF_out[index][i] = L;
BF_out[index][i + 1] = R;
} while (--count);
/* This has to be bug-compatible with the original implementation :-) */
BF_out[index][5] &= ~(BF_word)0xFF;
}
#endif
}
}
#if BF_mt == 1
void BF_std_crypt_exact(int index)
{
BF_word L, R;
BF_word u1, u2, u3, u4;
BF_word count;
int i;
memcpy(&BF_out[index][2], &BF_magic_w[2], sizeof(BF_word) * 4);
count = 64;
do
for (i = 2; i < 6; i += 2) {
L = BF_out[index][i];
R = BF_out[index][i + 1];
BF_ENCRYPT(BF_current INDEX, L, R);
BF_out[index][i] = L;
BF_out[index][i + 1] = R;
} while (--count);
/* This has to be bug-compatible with the original implementation :-) */
BF_out[index][5] &= ~(BF_word)0xFF;
}
#endif
/*
* I'm not doing any error checking in the routines below since the
* ciphertext should have already been checked to be fmt_BF.valid().
*/
static void BF_decode(BF_word *dst, char *src, int size)
{
unsigned char *dptr = (unsigned char *)dst;
unsigned char *end = dptr + size;
unsigned char *sptr = (unsigned char *)src;
unsigned int c1, c2, c3, c4;
do {
c1 = BF_atoi64[ARCH_INDEX(*sptr++)];
c2 = BF_atoi64[ARCH_INDEX(*sptr++)];
*dptr++ = (c1 << 2) | ((c2 & 0x30) >> 4);
if (dptr >= end) break;
c3 = BF_atoi64[ARCH_INDEX(*sptr++)];
*dptr++ = ((c2 & 0x0F) << 4) | ((c3 & 0x3C) >> 2);
if (dptr >= end) break;
c4 = BF_atoi64[ARCH_INDEX(*sptr++)];
*dptr++ = ((c3 & 0x03) << 6) | c4;
} while (dptr < end);
}
void *BF_std_get_salt(char *ciphertext)
{
static BF_salt salt;
BF_decode(salt.salt, &ciphertext[7], 16);
BF_swap(salt.salt, 4);
salt.rounds = atoi(&ciphertext[4]);
if (ciphertext[2] == 'a')
salt.subtype = 'y';
else
salt.subtype = ciphertext[2];
return &salt;
}
void *BF_std_get_binary(char *ciphertext)
{
static BF_binary binary;
binary[5] = 0;
BF_decode(binary, &ciphertext[29], 23);
BF_swap(binary, 6);
binary[5] &= ~(BF_word)0xFF;
return &binary;
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> CmNonstrictHandler;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
unsigned getDepth() const { return Depth; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed);
PP.Lex(Tok);
PP.EnterToken(Next);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma cm_nonstrict...
void HandlePragmaCmNonstrict();
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind
TryAnnotateName(bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseCMMethodExpr(ExprResult LHS);
ExprResult ParseCMAll(ExprResult LHS);
ExprResult ParseCMAny(ExprResult LHS);
ExprResult ParseCMColumn(ExprResult LHS);
ExprResult ParseCMFormat(ExprResult LHS);
ExprResult ParseCMGenxSelect(ExprResult LHS);
ExprResult ParseCMIselect(ExprResult LHS);
ExprResult ParseCMMerge(ExprResult LHS);
ExprResult ParseCMNCols(ExprResult LHS);
ExprResult ParseCMNElems(ExprResult LHS);
ExprResult ParseCMNRows(ExprResult LHS);
ExprResult ParseCMReplicate(ExprResult LHS);
ExprResult ParseCMRow(ExprResult LHS);
ExprResult ParseCMSelect(ExprResult LHS);
ExprResult ParseCMSelectAll(ExprResult LHS);
bool isCMMethodIdentifier(const IdentifierInfo &Id);
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(
SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> Completer = llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParseCMExpressionList - A simple comma-separated list of expressions in a
/// context where '>' is not considered to be an operator.
bool ParseCMExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false);
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro,
bool *SkippedInits = nullptr);
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
ExprResult ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
bool AllowOpenMPStandalone = false);
enum AllowedConstructsKind {
/// Allow any declarations, statements, OpenMP directives.
ACK_Any,
/// Allow only statements and non-standalone OpenMP directives.
ACK_StatementsOpenMPNonStandalone,
/// Allow statements and all executable OpenMP directives
ACK_StatementsOpenMPAnyExecutable
};
StmtResult
ParseStatementOrDeclaration(StmtVector &Stmts, AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement();
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs);
StmtResult ParseCaseStatement(bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement();
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *HasMissingTypename = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
void ParseCMTypeSpecifiers(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param Allowed ACK_Any, if any directives are allowed,
/// ACK_StatementsOpenMPAnyExecutable - if any executable directives are
/// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone
/// executable directives are allowed.
///
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(AllowedConstructsKind Allowed);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionIdScopeSpec;
DeclarationNameInfo ReductionId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl();
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
md5.c | /*
* This is an OpenSSL-compatible implementation of the RSA Data Security, Inc.
* MD5 Message-Digest Algorithm (RFC 1321).
*
* Homepage:
* http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5
*
* Author:
* Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
*
* This software was written by Alexander Peslyak in 2001. No copyright is
* claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the
* public domain is deemed null and void, then the software is
* Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
* general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
* (This is a heavily cut-down "BSD license".)
*
* This differs from Colin Plumb's older public domain implementation in that
* no exactly 32-bit integer data type is required (any 32-bit or wider
* unsigned integer data type will do), there's no compile-time endianness
* configuration, and the function prototypes match OpenSSL's. No code from
* Colin Plumb's implementation has been reused; this comment merely compares
* the properties of the two independent implementations.
*
* The primary goals of this implementation are portability and ease of use.
* It is meant to be fast, but not as fast as possible. Some known
* optimizations are not included to reduce source code size and avoid
* compile-time configuration.
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#if defined(_OPENMP)
# include <omp.h>
# define MD5_PARALLELISM_DEGREE 4
#endif
#include "md5.h"
/*
* The basic MD5 functions.
*
* F and G are optimized compared to their RFC 1321 definitions for
* architectures that lack an AND-NOT instruction, just like in Colin Plumb's
* implementation.
*/
#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
#define H(x, y, z) (((x) ^ (y)) ^ (z))
#define H2(x, y, z) ((x) ^ ((y) ^ (z)))
#define I(x, y, z) ((y) ^ ((x) | ~(z)))
/*
* The MD5 transformation for all four rounds.
*/
#define STEP(f, a, b, c, d, x, t, s) \
(a) += f((b), (c), (d)) + (x) + (t); \
(a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
(a) += (b);
/*
* SET reads 4 input bytes in little-endian byte order and stores them
* in a properly aligned word in host byte order.
*
* The check for little-endian architectures that tolerate unaligned
* memory accesses is just an optimization. Nothing will break if it
* doesn't work.
*/
#if defined(__i386__) || defined(__x86_64__) || defined(__vax__)
#define SET(n) \
(*(MD5_u32plus *)&ptr[(n) * 4])
#define GET(n) \
SET(n)
#else
#define SET(n) \
(ctx->block[(n)] = \
(MD5_u32plus)ptr[(n) * 4] | \
((MD5_u32plus)ptr[(n) * 4 + 1] << 8) | \
((MD5_u32plus)ptr[(n) * 4 + 2] << 16) | \
((MD5_u32plus)ptr[(n) * 4 + 3] << 24))
#define GET(n) \
(ctx->block[(n)])
#endif
/*
* This processes one or more 64-byte data blocks, but does NOT update
* the bit counters. There are no alignment requirements.
*/
static const void *body(MD5_CTX *ctx, const void *data, unsigned long size)
{
const unsigned char *ptr;
MD5_u32plus a, b, c, d;
MD5_u32plus saved_a, saved_b, saved_c, saved_d;
ptr = (const unsigned char *)data;
a = ctx->a;
b = ctx->b;
c = ctx->c;
d = ctx->d;
do {
saved_a = a;
saved_b = b;
saved_c = c;
saved_d = d;
/* Round 1 */
STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
/* Round 2 */
STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
/* Round 3 */
STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
STEP(H2, d, a, b, c, GET(8), 0x8771f681, 11)
STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
STEP(H2, b, c, d, a, GET(14), 0xfde5380c, 23)
STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
STEP(H2, d, a, b, c, GET(4), 0x4bdecfa9, 11)
STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
STEP(H2, b, c, d, a, GET(10), 0xbebfbc70, 23)
STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
STEP(H2, d, a, b, c, GET(0), 0xeaa127fa, 11)
STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
STEP(H2, b, c, d, a, GET(6), 0x04881d05, 23)
STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
STEP(H2, d, a, b, c, GET(12), 0xe6db99e5, 11)
STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
STEP(H2, b, c, d, a, GET(2), 0xc4ac5665, 23)
/* Round 4 */
STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
a += saved_a;
b += saved_b;
c += saved_c;
d += saved_d;
ptr += 64;
} while (size -= 64);
ctx->a = a;
ctx->b = b;
ctx->c = c;
ctx->d = d;
return ptr;
}
void MD5_Init(MD5_CTX *ctx)
{
ctx->a = 0x67452301;
ctx->b = 0xefcdab89;
ctx->c = 0x98badcfe;
ctx->d = 0x10325476;
ctx->lo = 0;
ctx->hi = 0;
}
void MD5_Update(MD5_CTX *ctx, const void *data, unsigned long size)
{
MD5_u32plus saved_lo;
unsigned long used, available;
saved_lo = ctx->lo;
if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo)
ctx->hi++;
ctx->hi += size >> 29;
used = saved_lo & 0x3f;
if (used) {
available = 64 - used;
if (size < available) {
memcpy(&ctx->buffer[used], data, size);
return;
}
memcpy(&ctx->buffer[used], data, available);
data = (const unsigned char *)data + available;
size -= available;
body(ctx, ctx->buffer, 64);
}
if (size >= 64) {
data = body(ctx, data, size & ~(unsigned long)0x3f);
size &= 0x3f;
}
memcpy(ctx->buffer, data, size);
}
void MD5_Final(MD5_CTX *ctx, unsigned char *result)
{
unsigned long used, available;
used = ctx->lo & 0x3f;
ctx->buffer[used++] = 0x80;
available = 64 - used;
if (available < 8) {
memset(&ctx->buffer[used], 0, available);
body(ctx, ctx->buffer, 64);
used = 0;
available = 64;
}
memset(&ctx->buffer[used], 0, available - 8);
ctx->lo <<= 3;
ctx->buffer[56] = ctx->lo;
ctx->buffer[57] = ctx->lo >> 8;
ctx->buffer[58] = ctx->lo >> 16;
ctx->buffer[59] = ctx->lo >> 24;
ctx->buffer[60] = ctx->hi;
ctx->buffer[61] = ctx->hi >> 8;
ctx->buffer[62] = ctx->hi >> 16;
ctx->buffer[63] = ctx->hi >> 24;
body(ctx, ctx->buffer, 64);
result[0] = ctx->a;
result[1] = ctx->a >> 8;
result[2] = ctx->a >> 16;
result[3] = ctx->a >> 24;
result[4] = ctx->b;
result[5] = ctx->b >> 8;
result[6] = ctx->b >> 16;
result[7] = ctx->b >> 24;
result[8] = ctx->c;
result[9] = ctx->c >> 8;
result[10] = ctx->c >> 16;
result[11] = ctx->c >> 24;
result[12] = ctx->d;
result[13] = ctx->d >> 8;
result[14] = ctx->d >> 16;
result[15] = ctx->d >> 24;
memset(ctx, 0, sizeof(*ctx));
}
void MD5_Data(const void *data, unsigned long size, unsigned char *result)
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, data, size);
MD5_Final(&ctx, result);
}
static const size_t buflen = 8*1024;
int MD5_File(const char *filename, unsigned char *result)
{
memset(result, 0, MD5_OUTBYTES);
struct stat st;
if(stat(filename, &st)!=0 || !S_ISREG(st.st_mode) || st.st_size==0) {
// file not exist || not regular file || empty file
return -1;
}
FILE *f = fopen(filename, "rb");
if(f)
{
unsigned char *buf = malloc(buflen);
MD5_CTX ctx;
MD5_Init(&ctx);
int i;
while ((i = fread(buf, 1, buflen, f)) > 0)
MD5_Update(&ctx, buf, i);
MD5_Final(&ctx, result);
free(buf);
fclose(f);
return 0;
}
return -1;
}
static const long minlen = 1024*1024;
int MD5_File_Parallel( const char *filename, unsigned char *result )
{
memset(result, 0, MD5_OUTBYTES);
struct stat st;
if(stat(filename, &st)!=0 || !S_ISREG(st.st_mode) || st.st_size==0) {
// file not exist || not regular file || empty file
return -1;
}
if(st.st_size < minlen) return MD5_File(filename, result);
const size_t parallel_size = (st.st_size + MD5_PARALLELISM_DEGREE - 1) / MD5_PARALLELISM_DEGREE;
unsigned char sum[MD5_PARALLELISM_DEGREE][MD5_OUTBYTES];
MD5_CTX S[MD5_PARALLELISM_DEGREE][1];
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(MD5_PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < MD5_PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
FILE *file = fopen(filename, "rb");
if(file)
{
MD5_Init(S[id__]);
size_t read_pos = id__ * parallel_size;
size_t read_len = (id__ == MD5_PARALLELISM_DEGREE-1) ? (st.st_size - read_pos) : (parallel_size);
unsigned char *buf = malloc(buflen);
fseek(file, read_pos, SEEK_SET);
while(read_len >= buflen)
{
fread(buf, buflen, 1, file);
MD5_Update( S[id__], buf, buflen);
read_len -= buflen;
}
if(read_len > 0)
{
fread(buf, read_len, 1, file);
MD5_Update( S[id__], buf, read_len);
}
MD5_Final( S[id__], sum[id__]);
free(buf);
fclose(file);
}//end of if(file)
}//end of for(MD5_PARALLELISM_DEGREE)
MD5_CTX FS[1];
MD5_Init(FS);
for( size_t i = 0; i < MD5_PARALLELISM_DEGREE; ++i )
MD5_Update( FS, sum[i], MD5_OUTBYTES );
MD5_Final( FS, result );
return 0;
}
|
GB_unop__ainv_bool_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__ainv_bool_bool
// op(A') function: GB_unop_tran__ainv_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__ainv_bool_bool
(
bool *Cx, // Cx and Ax may be aliased
const bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__ainv_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tv_fista.h | #pragma once
#include <iostream>
#include <cmath>
#include <omp.h>
#include "Image.h"
//Written by Yngve Mardal Moe
//
//Functions used for TV denoising as part of the PDImage python package
//
// [1]: Beck, A & Teboulle, M - "Fast Gradient-Based Algorithms for Constrained
// Total Variation Image Denoising and Deblurring Problems."
// 2009, IEEE Transactions on Image Processing.
// [2]: O'Donoghue, B & Candes, E - "Adaptive Restart for Accelerated Gradient Schemes"
// 2012, Foundations of Computational Mathematics
using namespace std;
//Projection functions used in the FISTA iterations and proximal gradient step computations
void dual_projection(double* dual_x, double* dual_y, int y, int x)
{
//Projects the dual of the ROF functional onto its feasible set
int i;
double dual_norm;
#pragma omp parallel for
for (i = 0; i < y*x; i++)
{
dual_norm = max(1., sqrt(dual_x[i] * dual_x[i] +
dual_y[i] * dual_y[i]));
dual_x[i] /= dual_norm;
dual_y[i] /= dual_norm;
}
}
void primal_projection(double* image, double min_value, double max_value, int y, int x)
{
//Projects the ROF functional onto its feasible set
int i;
#pragma omp parallel for
for (i = 0; i < y*x; i++)
{
if (image[i] < min_value)
{
image[i] = min_value;
}
else if (image[i] > max_value)
{
image[i] = max_value;
}
}
}
void primal_off_projection(double* image, double* raw, double gamma, double min_value, double max_value, int y, int x)
{
//The projection off the feasible set for the ROF functional
int i;
for (i = 0; i < y*x; i++)
{
if (image[i] < min_value)
{
image[i] -= -min_value;
}
else if (image[i] > max_value)
{
image[i] -= -max_value;
}
else
{
image[i] = 0;
}
}
}
//Functions used to compute the proximal gradient steps of the minimization target functions
//for the ROF problem. They are also used to go from the dual to primal problem.
void sub_diff_2d_x(double* dual_x, double* image, int y, int x)
{
//Subtracts the forward finite difference x-derivative of image to dual_x
int i, j;
for (i = 0; i < y; i++)
{
for (j = 0; j < x - 1; j++)
{
dual_x[i*x + j] += image[i*x + j] - image[i*x + j + 1];
}
}
}
void sub_diff_2d_y(double* dual_y, double* image, int y, int x)
{
//Subtracts the forward finite difference x-derivative of image to dual1
int i, j;
for (i = 0; i < y - 1; i++)
{
for (j = 0; j < x; j++)
{
dual_y[i*x + j] += image[i*x + j] - image[(i + 1)*x + j];
}
}
}
void gradient_2d(double* dual_x, double* dual_y, double* image, int y, int x)
{
//Adds the forward finite-difference derivative of image in the x-direction to dual_x and in the y-direction to dual_y
sub_diff_2d_x(dual_x, image, y, x);
sub_diff_2d_y(dual_y, image, y, x);
}
void divergence_2d(double* image, double* dual_x, double* dual_y, int y, int x)
{
//Adds the backwards finite difference divergence of the dual function,
//where dual_x is the x-component of dual and dual_y is the y-component of dual, to image
int i, j, index;
for (i = 0; i < y; i++)
{
for (j = 0; j < x; j++)
{
index = i*x + j;
if (i != 0 && j != 0)
{
image[index] += dual_x[index] + dual_y[index] - dual_x[index - 1] - dual_y[index - x];
}
else if (i == 0 && j != 0)
{
image[index] += dual_x[index] + dual_y[index] - dual_x[index - 1];
}
else if (i != 0 && j == 0)
{
image[index] += dual_x[index] + dual_y[index] - dual_y[index - x];
}
else
{
image[index] += dual_x[index] + dual_y[index];
}
}
}
}
double TV_norm(double* image, int y, int x)
{
//Computes the TV_norm of :image:
int i;
double norm = 0;
double* diff_x = new double[y*x]();
double* diff_y = new double[y*x]();
sub_diff_2d_x(diff_x, image, y, x);
sub_diff_2d_y(diff_y, image, y, x);
for (i = 0; i < y*x; i++)
{
norm += sqrt((diff_x[i] * diff_x[i])
+ (diff_y[i] * diff_y[i]));
}
delete[] diff_x;
delete[] diff_y;
return norm;
}
//The minimization target functions for the ROF problem
double TV_problem(double* image, double* raw, double gamma, int y, int x)
{
//Returns the value of the TV deconvolution problem, min ||I - b||^2 + g*||I||_TV,
//where b is the image we want to denoise, g is the regularization constant (noise level)
//and I is the current iteration.
//
//:param image: = I
//:param raw: = b
//:param gamma: = g
//:param y: = Number of vertical pixels
//:param x: = Number of horizontal pixels
return sum_squared_error(image, raw, y, x) + gamma*TV_norm(image, y, x);
}
double TV_deconvolve_problem(double* image, double*decon_image, double*raw, double gamma, int y, int x)
{
//Returns the value of the TV deconvolution problem, min ||F*I - b||^2 + g*||I||_TV, where F* is the convolution operator
//we want to invert, b is the image we want to deconvolve, g is the regularization constant (noise level)
//and I is the current iteration.
//
//:param image: = I
//:param decon_image: = F*I
//:param raw: = b
//:param gamma: = g
//:param y: = Number of vertical pixels
//:param x: = Number of horizontal pixels
return sum_squared_error(image, raw, y, x) + gamma*TV_norm(decon_image, y, x);
}
double dual_problem(double* dual_x, double* dual_y, double* image, double* raw, double gamma,
double min_value, double max_value, int y, int x)
{
//Returns the value of the dual problem described in [1].
double problem_value = 0;
set_zero(image, y, x);
divergence_2d(image, dual_x, dual_y, y, x);
multiply_image(image, gamma, y, x);
subtract_image_first_from_second(image, raw, y, x);
problem_value += sum_squares(image, y, x);
primal_off_projection(image, raw, gamma, min_value, max_value, y, x);
problem_value += sum_squares(image, y, x);
return problem_value;
}
//The function to get from the dual variables to primal variables
void dual_to_image(double* image, double* dual_x, double* dual_y, double* raw, double gamma,
double min_value, double max_value, int y, int x)
{
//Computes what image to create from the dual variables, and stores it in :*image:, using equation 4.4 from [1]
set_zero(image, y, x);
divergence_2d(image, dual_x, dual_y, y, x);
multiply_image(image, gamma, y, x);
subtract_image_first_from_second(image, raw, y, x);
primal_projection(image, min_value, max_value, y, x);
}
//The proximal gradient step for the ROF dual
void proximal_step(double* dual_x, double* dual_y, double* image, double* raw, double gamma,
double min_value, double max_value, int y, int x)
{
//Performs a proximal step of the dual variable as described in [1].
dual_to_image(image, dual_x, dual_y, raw, gamma, min_value, max_value, y, x);
multiply_image(image, 1 / (8 * gamma), y, x);
gradient_2d(dual_x, dual_y, image, y, x);
dual_projection(dual_x, dual_y, y, x);
}
//The FISTA function.
void TV_FISTA(double* image, double* raw, double gamma, double min_value, double max_value,
int max_it, double eps, int y, int x)
{
//Performs FISTA iterations for the TV problem, with a scheme for restarting the momentum term as described in [1] and [2]
double* dual_xn = new double[x*y]();
double* dual_yn = new double[x*y]();
double* dual_xn_1 = new double[x*y]();
double* dual_yn_1 = new double[x*y]();
double* dual_x_momentum = new double[x*y]();
double* dual_y_momentum = new double[x*y]();
double* temp_x;
double* temp_y;
double t = 1;
double t_1 = 1;
double fn;
double fn_1;
int n;
fn = dual_problem(dual_xn, dual_yn, image, raw, gamma, min_value, max_value, y, x);
for (n = 0; n < max_it; n++)
{
fn_1 = fn;
// Set previous variable
copy_image(dual_xn_1, dual_xn, y, x); //xn-1 = xn
copy_image(dual_yn_1, dual_yn, y, x);
// Update current variable
proximal_step(dual_x_momentum, dual_y_momentum, image, raw, gamma, min_value, max_value, y, x);
temp_x = dual_xn; //temp_x points to previous iteration
temp_y = dual_yn;
dual_xn = dual_x_momentum; //dual_xn points to current iteration
dual_yn = dual_y_momentum;
dual_x_momentum = temp_x; //dual_x_momentum points to temp_x, which points to previous iteration
dual_y_momentum = temp_y;
fn = dual_problem(dual_xn, dual_yn, image, raw, gamma, min_value, max_value, y, x);
// Test for reseting the momentum
if (fn_1 - fn < 0)
{
t = 1;
t_1 = 1;
}
// Test for convergence
else if (fn_1 - fn < eps*fn)
{
break;
}
// Update momentum multiplier
t_1 = t;
t = (1 + sqrt(1 + 4 * t*t)) / 2;
// Update momentum term
copy_image(dual_x_momentum, dual_xn, y, x); // dual_x_m = dual_xn_1
copy_image(dual_y_momentum, dual_yn, y, x);
subtract_image_second_from_first(dual_x_momentum, dual_xn_1, y, x); //dual_x_m = dual_xn - dual_xn_1
subtract_image_second_from_first(dual_y_momentum, dual_yn_1, y, x);
multiply_image(dual_x_momentum, (t_1 - 1) / t, y, x); //r = (t_1 - 1 / t) * (pn - pn_1)
multiply_image(dual_y_momentum, (t_1 - 1) / t, y, x);
add_image(dual_x_momentum, dual_xn, y, x); //r_n = pn + (t_1 - 1 / t) * (pn - pn_1)
add_image(dual_y_momentum, dual_yn, y, x);
}
temp_x = NULL;
temp_y = NULL;
dual_to_image(image, dual_xn, dual_yn, raw, gamma, min_value, max_value, y, x);
delete[] dual_xn;
delete[] dual_yn;
delete[] dual_xn_1;
delete[] dual_yn_1;
delete[] dual_x_momentum;
delete[] dual_y_momentum;
} |
common.c | /****************************************************************************
* *
* OpenMP MicroBenchmark Suite - Version 3.1 *
* *
* produced by *
* *
* Mark Bull, Fiona Reid and Nix Mc Donnell *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk *
* *
* *
* This version copyright (c) The University of Edinburgh, 2015. *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <omp.h>
#include "common.h"
#define CONF95 1.96
int nthreads = -1; // Number of OpenMP threads
int delaylength = -1; // The number of iterations to delay for
int outerreps = -1; // Outer repetitions
double delaytime = -1.0; // Length of time to delay for in microseconds
double targettesttime = 0.0; // The length of time in microseconds that the test
// should run for.
unsigned long innerreps; // Inner repetitions
double *times; // Array of doubles storing the benchmark times in microseconds
double referencetime; // The average reference time in microseconds to perform
// outerreps runs
double referencesd; // The standard deviation in the reference time in
// microseconds for outerreps runs.
double testtime; // The average test time in microseconds for
// outerreps runs
double testsd; // The standard deviation in the test time in
// microseconds for outerreps runs.
void usage(char *argv[]) {
printf("Usage: %s.x \n"
"\t--outer-repetitions <outer-repetitions> (default %d)\n"
"\t--test-time <target-test-time> (default %0.2f microseconds)\n"
"\t--delay-time <delay-time> (default %0.4f microseconds)\n"
"\t--delay-length <delay-length> "
"(default auto-generated based on processor speed)\n",
argv[0],
DEFAULT_OUTER_REPS, DEFAULT_TEST_TARGET_TIME, DEFAULT_DELAY_TIME);
}
void parse_args(int argc, char *argv[]) {
// Parse the parameters
int arg;
for (arg = 1; arg < argc; arg++) {
if (strcmp(argv[arg], "--delay-time") == 0.0) {
delaytime = atof(argv[++arg]);
if (delaytime == 0.0) {
printf("Invalid float:--delay-time: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--outer-repetitions") == 0) {
outerreps = atoi(argv[++arg]);
if (outerreps == 0) {
printf("Invalid integer:--outer-repetitions: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--test-time") == 0) {
targettesttime = atof(argv[++arg]);
if (targettesttime == 0) {
printf("Invalid integer:--test-time: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "-h") == 0) {
usage(argv);
exit(EXIT_SUCCESS);
} else {
printf("Invalid parameters: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
}
}
int getdelaylengthfromtime(double delaytime) {
int i, reps;
double lapsedtime, starttime; // seconds
reps = 1000;
lapsedtime = 0.0;
delaytime = delaytime/1.0E6; // convert from microseconds to seconds
// Note: delaytime is local to this function and thus the conversion
// does not propagate to the main code.
// Here we want to use the delaytime in microseconds to find the
// delaylength in iterations. We start with delaylength=0 and
// increase until we get a large enough delaytime, return delaylength
// in iterations.
delaylength = 0;
delay(delaylength);
while (lapsedtime < delaytime) {
delaylength = delaylength * 1.1 + 1;
starttime = getclock();
for (i = 0; i < reps; i++) {
delay(delaylength);
}
lapsedtime = (getclock() - starttime) / (double) reps;
}
return delaylength;
}
unsigned long getinnerreps(void (*test)(void)) {
innerreps = 1L; // some initial value
double time = 0.0;
double start = getclock();
test();
time = (getclock() - start) * 1.0e6;
if (time > targettesttime) {
return innerreps;
}
while (time < targettesttime) {
double start = getclock();
test();
time = (getclock() - start) * 1.0e6;
innerreps *=2;
// Test to stop code if compiler is optimising reference time expressions away
if (innerreps > (targettesttime*1.0e15)) {
printf("Compiler has optimised reference loop away, STOP! \n");
printf("Try recompiling with lower optimisation level \n");
exit(1);
}
}
return innerreps;
}
void printheader(char *name) {
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing %s time using %lu reps\n", name, innerreps);
}
void stats(double *mtp, double *sdp) {
double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff;
int i, nr;
mintime = 1.0e10;
maxtime = 0.;
totaltime = 0.;
for (i = 1; i <= outerreps; i++) {
mintime = (mintime < times[i]) ? mintime : times[i];
maxtime = (maxtime > times[i]) ? maxtime : times[i];
totaltime += times[i];
}
meantime = totaltime / outerreps;
sumsq = 0;
for (i = 1; i <= outerreps; i++) {
sumsq += (times[i] - meantime) * (times[i] - meantime);
}
sd = sqrt(sumsq / (outerreps - 1));
cutoff = 3.0 * sd;
nr = 0;
for (i = 1; i <= outerreps; i++) {
if (fabs(times[i] - meantime) > cutoff)
nr++;
}
printf("\n");
printf("Sample_size Average Min Max S.D. Outliers\n");
printf(" %d %f %f %f %f %d\n",
outerreps, meantime, mintime, maxtime, sd, nr);
printf("\n");
*mtp = meantime;
*sdp = sd;
}
void printfooter(char *name, double testtime, double testsd,
double referencetime, double refsd) {
printf("%s time = %f microseconds +/- %f\n",
name, testtime, CONF95*testsd);
printf("%s overhead = %f microseconds +/- %f\n",
name, testtime-referencetime, CONF95*(testsd+referencesd));
}
void printreferencefooter(char *name, double referencetime, double referencesd) {
printf("%s time = %f microseconds +/- %f\n",
name, referencetime, CONF95 * referencesd);
}
void init(int argc, char **argv)
{
#pragma omp parallel
{
#pragma omp master
{
nthreads = omp_get_num_threads();
}
}
parse_args(argc, argv);
if (outerreps == -1) {
outerreps = DEFAULT_OUTER_REPS;
}
if (targettesttime == 0.0) {
targettesttime = DEFAULT_TEST_TARGET_TIME;
}
if (delaytime == -1.0) {
delaytime = DEFAULT_DELAY_TIME;
}
delaylength = getdelaylengthfromtime(delaytime); // Always need to compute delaylength in iterations
times = malloc((outerreps+1) * sizeof(double));
printf("Running OpenMP benchmark version 3.0\n"
"\t%d thread(s)\n"
"\t%d outer repetitions\n"
"\t%0.2f test time (microseconds)\n"
"\t%d delay length (iterations) \n"
"\t%f delay time (microseconds)\n",
nthreads,
outerreps, targettesttime,
delaylength, delaytime);
}
void finalise(void) {
free(times);
}
void initreference(char *name) {
printheader(name);
}
/* Calculate the reference time. */
void reference(char *name, void (*refer)(void)) {
int k;
double start;
// Calculate the required number of innerreps
innerreps = getinnerreps(refer);
initreference(name);
for (k = 0; k <= outerreps; k++) {
start = getclock();
refer();
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
finalisereference(name);
}
void finalisereference(char *name) {
stats(&referencetime, &referencesd);
printreferencefooter(name, referencetime, referencesd);
}
void intitest(char *name) {
printheader(name);
}
void finalisetest(char *name) {
stats(&testtime, &testsd);
printfooter(name, testtime, testsd, referencetime, referencesd);
}
/* Function to run a microbenchmark test*/
void benchmark(char *name, void (*test)(void))
{
int k;
double start;
// Calculate the required number of innerreps
unsigned long innerreps1 = getinnerreps(test);
unsigned long innerreps2 = getinnerreps(test);
unsigned long innerreps3 = getinnerreps(test);
unsigned long innerreps4 = getinnerreps(test);
innerreps = innerreps1 > innerreps ? innerreps1 : innerreps;
innerreps = innerreps2 > innerreps ? innerreps2 : innerreps;
innerreps = innerreps3 > innerreps ? innerreps3 : innerreps;
innerreps = innerreps4 > innerreps ? innerreps4 : innerreps;
intitest(name);
for (k=0; k<=outerreps; k++) {
start = getclock();
test();
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
finalisetest(name);
}
// For the Cray compiler on HECToR we need to turn off optimisation
// for the delay and array_delay functions. Other compilers should
// not be afffected.
#pragma _CRI noopt
void delay(int delaylength) {
int i;
float a = 0.;
for (i = 0; i < delaylength; i++)
a += i;
if (a < 0)
printf("%f \n", a);
}
void array_delay(int delaylength, double a[1]) {
int i;
a[0] = 1.0;
for (i = 0; i < delaylength; i++)
a[0] += i;
if (a[0] < 0)
printf("%f \n", a[0]);
}
// Re-enable optimisation for remainder of source.
#pragma _CRI opt
// Linux-specific, but much more precise
double getclock() {
struct timespec nowtime;
clock_gettime(CLOCK_MONOTONIC, &nowtime);
return nowtime.tv_sec + 1.0e-9 * nowtime.tv_nsec;
}
int returnfalse() {
return 0;
}
|
mixed_tentusscher_myo_epi_2004_S1_13.c | // Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S1_13.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6124649455479,0.00127794843601506,0.780792340373146,0.780621416430051,0.000173708999922449,0.485550218750528,0.00293105929439211,0.999998362618276,1.91930561072561e-08,1.87995323300123e-05,0.999771109571080,1.00717919024407,0.999996460509174,4.32012539733253e-05,0.681225232256513,9.64639490171753,139.835052468258}; for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.9878532723791,0.000381859667002297,0.000164377584172001,0.000429492452900182,0.281717671480526,0.172178664836313,0.158009524014960,3.54321400489854,0.0185670643252902,2.13545487708985,1099.99990980037,0.000491721845343899,0.419354711210666,0.0199628106883488,0.00141401145930471,3.06197556760024e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
irbuilder_unroll_partial_heuristic_runtime_for.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
// REQUIRES: x86-registered-target
#ifndef HEADER
#define HEADER
double sind(double);
// CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_runtime_for(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[N_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[E_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[OFFSET_ADDR:.+]] = alloca float, align 4
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32 %[[N:.+]], i32* %[[N_ADDR]], align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store float* %[[E:.+]], float** %[[E_ADDR]], align 8
// CHECK-NEXT: store float %[[OFFSET:.+]], float* %[[OFFSET_ADDR]], align 4
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 1
// CHECK-NEXT: store i32* %[[N_ADDR]], i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP2:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[TMP2]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: %[[TMP4:.+]] = udiv i32 %[[DOTCOUNT]], 4
// CHECK-NEXT: %[[TMP5:.+]] = urem i32 %[[DOTCOUNT]], 4
// CHECK-NEXT: %[[TMP6:.+]] = icmp ne i32 %[[TMP5]], 0
// CHECK-NEXT: %[[TMP7:.+]] = zext i1 %[[TMP6]] to i32
// CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP4]], %[[TMP7]]
// CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]:
// CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP8:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1
// CHECK-NEXT: store i32 %[[TMP8]], i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: %[[TMP11:.+]] = sub i32 %[[TMP10]], %[[TMP9]]
// CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[TMP11]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_HEADER]]:
// CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_COND]]:
// CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP12]]
// CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_BODY]]:
// CHECK-NEXT: %[[TMP13:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP9]]
// CHECK-NEXT: %[[TMP14:.+]] = icmp eq i32 %[[TMP13]], %[[OMP_FLOOR0_TRIPCOUNT]]
// CHECK-NEXT: %[[TMP15:.+]] = select i1 %[[TMP14]], i32 %[[TMP5]], i32 4
// CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_HEADER]]:
// CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_COND]]:
// CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP15]]
// CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_BODY]]:
// CHECK-NEXT: %[[TMP16:.+]] = mul nuw i32 4, %[[TMP13]]
// CHECK-NEXT: %[[TMP17:.+]] = add nuw i32 %[[TMP16]], %[[OMP_TILE0_IV]]
// CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP17]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP18:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP19:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP19]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP18]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP20:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[CONV:.+]] = fpext float %[[TMP20]] to double
// CHECK-NEXT: %[[CALL:.+]] = call double @sind(double noundef %[[CONV]])
// CHECK-NEXT: %[[TMP21:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP22:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP22]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP21]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP23:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[CONV4:.+]] = fpext float %[[TMP23]] to double
// CHECK-NEXT: %[[MUL:.+]] = fmul double %[[CALL]], %[[CONV4]]
// CHECK-NEXT: %[[TMP24:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP25:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM5:.+]] = sext i32 %[[TMP25]] to i64
// CHECK-NEXT: %[[ARRAYIDX6:.+]] = getelementptr inbounds float, float* %[[TMP24]], i64 %[[IDXPROM5]]
// CHECK-NEXT: %[[TMP26:.+]] = load float, float* %[[ARRAYIDX6]], align 4
// CHECK-NEXT: %[[CONV7:.+]] = fpext float %[[TMP26]] to double
// CHECK-NEXT: %[[MUL8:.+]] = fmul double %[[MUL]], %[[CONV7]]
// CHECK-NEXT: %[[TMP27:.+]] = load float*, float** %[[E_ADDR]], align 8
// CHECK-NEXT: %[[TMP28:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM9:.+]] = sext i32 %[[TMP28]] to i64
// CHECK-NEXT: %[[ARRAYIDX10:.+]] = getelementptr inbounds float, float* %[[TMP27]], i64 %[[IDXPROM9]]
// CHECK-NEXT: %[[TMP29:.+]] = load float, float* %[[ARRAYIDX10]], align 4
// CHECK-NEXT: %[[CONV11:.+]] = fpext float %[[TMP29]] to double
// CHECK-NEXT: %[[MUL12:.+]] = fmul double %[[MUL8]], %[[CONV11]]
// CHECK-NEXT: %[[TMP30:.+]] = load float, float* %[[OFFSET_ADDR]], align 4
// CHECK-NEXT: %[[CONV13:.+]] = fpext float %[[TMP30]] to double
// CHECK-NEXT: %[[ADD:.+]] = fadd double %[[MUL12]], %[[CONV13]]
// CHECK-NEXT: %[[TMP31:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP32:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM14:.+]] = sext i32 %[[TMP32]] to i64
// CHECK-NEXT: %[[ARRAYIDX15:.+]] = getelementptr inbounds float, float* %[[TMP31]], i64 %[[IDXPROM14]]
// CHECK-NEXT: %[[TMP33:.+]] = load float, float* %[[ARRAYIDX15]], align 4
// CHECK-NEXT: %[[CONV16:.+]] = fpext float %[[TMP33]] to double
// CHECK-NEXT: %[[ADD17:.+]] = fadd double %[[CONV16]], %[[ADD]]
// CHECK-NEXT: %[[CONV18:.+]] = fptrunc double %[[ADD17]] to float
// CHECK-NEXT: store float %[[CONV18]], float* %[[ARRAYIDX15]], align 4
// CHECK-NEXT: br label %[[OMP_TILE0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_INC]]:
// CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_EXIT]]:
// CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_INC]]:
// CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_EXIT]]:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]])
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM19:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM19]])
// CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_partial_heuristic_runtime_for(int n, float *a, float *b, float *c, float *d, float *e, float offset) {
#pragma omp for
#pragma omp unroll partial
for (int i = 0; i < n; i++) {
a[i] += sind(b[i]) * c[i] * d[i] * e[i] + offset;
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 1
// CHECK-NEXT: %[[TMP5:.+]] = load i32*, i32** %[[TMP4]], align 8
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[TMP5]], align 4
// CHECK-NEXT: store i32 %[[TMP6]], i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP7]], %[[TMP8]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP9]], %[[TMP10]]
// CHECK-NEXT: %[[TMP11:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[SUB]], %[[TMP11]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP12:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP12]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
// CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 4}
|
custom_data_storage.c | // RUN: %libomp-tool -DFIRST_TOOL -o %t.first.tool.so %s && \
// RUN: %libomp-tool -DSECOND_TOOL -o %t.second.tool.so %s && \
// RUN: %libomp-compile && \
// RUN: env OMP_TOOL_LIBRARIES=%t.first.tool.so \
// RUN: CUSTOM_DATA_STORAGE_TOOL_LIBRARIES=%t.second.tool.so \
// RUN: %libomp-run | %sort-threads | FileCheck %s
// For GCC we don't get an event for master,
// see runtime/test/ompt/sycnchronization/master.c
// UNSUPPORTED: gcc
#if defined(FIRST_TOOL)
#include "first-tool.h"
#elif defined(SECOND_TOOL)
#include "second-tool.h"
#else /* APP */
#include "../ompt-signal.h"
#include "omp.h"
#include <stdio.h>
int main() {
int x, s = 0;
#pragma omp parallel num_threads(2) shared(s)
{
#pragma omp master
{
#pragma omp task shared(s)
{
omp_control_tool(5, 1, NULL);
OMPT_SIGNAL(s);
}
}
if (omp_get_thread_num() == 1)
OMPT_WAIT(s, 1);
}
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}0: NULL_POINTER=[[NULL]]
// CHECK: {{^}}0: ompt_event_runtime_shutdown
// CHECK: {{^}}0: ompt_event_runtime_shutdown
// CHECK: {{^}}[[_1ST_MSTR_TID:[0-9]+]]: _first_tool: ompt_event_thread_begin:
// CHECK-SAME: thread_type=ompt_thread_initial=1,
// CHECK-SAME: thread_id=[[_1ST_MSTR_TID]]
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_initial_task_begin:
// CHECK-SAME: parallel_id=[[_FIRST_INIT_PARALLEL_ID:[0-9]+]],
// CHECK-SAME: task_id=[[_FIRST_INITIAL_TASK_ID:[0-9]+]], actual_parallelism=1,
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_parallel_begin:
// CHECK-SAME: parent_task_id=[[_FIRST_INITIAL_TASK_ID]],
// CHECK-SAME: parent_task_frame.exit=(nil),
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: parallel_id=[[_FIRST_PARALLEL_ID:[0-9]+]], requested_team_size=2,
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}, invoker
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[_FIRST_PARALLEL_ID]],
// CHECK-SAME: task_id=[[_FIRST_MASTER_IMPLICIT_TASK_ID:[0-9]+]], team_size=2,
// CHECK-SAME: thread_num=0
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_masked_begin:
// CHECK-SAME: parallel_id=[[_FIRST_PARALLEL_ID]],
// CHECK-SAME: task_id=[[_FIRST_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_task_create:
// CHECK-SAME: parent_task_id=[[_FIRST_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}},
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[_FIRST_EXPLICIT_TASK_ID:[0-9]+]],
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit=4,
// CHECK-SAME: has_dependences=no
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_masked_end:
// CHECK-SAME: parallel_id=[[_FIRST_PARALLEL_ID]],
// CHECK-SAME: task_id=[[_FIRST_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_barrier_begin:
// CHECK-SAME: parallel_id=[[_FIRST_PARALLEL_ID]],
// CHECK-SAME: task_id=[[_FIRST_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_wait_barrier_begin:
// CHECK-SAME: parallel_id=[[_FIRST_PARALLEL_ID]],
// CHECK-SAME: task_id=[[_FIRST_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[_FIRST_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: second_task_id=[[_FIRST_EXPLICIT_TASK_ID]],
// CHECK-SAME: prior_task_status=ompt_task_switch=7
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_control_tool:
// CHECK-SAME: command=5, modifier=1, arg=(nil),
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: task level 0:
// CHECK-SAME: task_id=[[_FIRST_EXPLICIT_TASK_ID]]
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: task level 1:
// CHECK-SAME: task_id=[[_FIRST_MASTER_IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: task level 2:
// CHECK-SAME: task_id=[[_FIRST_INITIAL_TASK_ID]]
// CHECK: {{^}}[[_1ST_MSTR_TID]]:
// CHECK-SAME: _first_tool: parallel level 0: parallel_id=[[_FIRST_PARALLEL_ID]]
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: parallel level 1:
// CHECK-SAME: parallel_id={{[0-9]+}}
// CHECK: {{^}}[[_1ST_MSTR_TID]]:
// CHECK-SAME: _first_tool: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[_FIRST_EXPLICIT_TASK_ID]],
// CHECK-SAME: second_task_id=[[_FIRST_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: prior_task_status=ompt_task_complete=1
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_task_end:
// CHECK-SAME: task_id=[[_FIRST_EXPLICIT_TASK_ID]]
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_wait_barrier_end:
// CHECK-SAME: parallel_id=0,
// CHECK-SAME: task_id=[[_FIRST_MASTER_IMPLICIT_TASK_ID]], codeptr_ra=(nil)
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_barrier_end:
// CHECK-SAME: parallel_id=0,
// CHECK-SAME: task_id=[[_FIRST_MASTER_IMPLICIT_TASK_ID]], codeptr_ra=(nil)
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_implicit_task_end:
// CHECK-SAME: parallel_id=0, task_id=[[_FIRST_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: team_size=2, thread_num=0
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_parallel_end:
// CHECK-SAME: parallel_id=[[_FIRST_PARALLEL_ID]],
// CHECK-SAME: task_id=[[_FIRST_INITIAL_TASK_ID]], invoker
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[_1ST_MSTR_TID]]: _first_tool: ompt_event_thread_end:
// CHECK-SAME: thread_id=[[_1ST_MSTR_TID]]
// CHECK: {{^}}[[_2ND_MSTR_TID:[0-9]+]]: second_tool: ompt_event_thread_begin:
// CHECK-SAME: thread_type=ompt_thread_initial=1,
// CHECK-SAME: thread_id=[[_2ND_MSTR_TID]]
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_initial_task_begin:
// CHECK-SAME: parallel_id=[[SECOND_INIT_PARALLEL_ID:[0-9]+]],
// CHECK-SAME: task_id=[[SECOND_INITIAL_TASK_ID:[0-9]+]], actual_parallelism=1,
// CHECK-SAME: index=1, flags=1
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_parallel_begin:
// CHECK-SAME: parent_task_id=[[SECOND_INITIAL_TASK_ID]],
// CHECK-SAME: parent_task_frame.exit=(nil),
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: parallel_id=[[SECOND_PARALLEL_ID:[0-9]+]], requested_team_size=2,
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}, invoker
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[SECOND_PARALLEL_ID]],
// CHECK-SAME: task_id=[[SECOND_MASTER_IMPLICIT_TASK_ID:[0-9]+]], team_size=2,
// CHECK-SAME: thread_num=0
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_masked_begin:
// CHECK-SAME: parallel_id=[[SECOND_PARALLEL_ID]],
// CHECK-SAME: task_id=[[SECOND_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_task_create:
// CHECK-SAME: parent_task_id=[[SECOND_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}},
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[SECOND_EXPLICIT_TASK_ID:[0-9]+]],
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit=4,
// CHECK-SAME: has_dependences=no
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_masked_end:
// CHECK-SAME: parallel_id=[[SECOND_PARALLEL_ID]],
// CHECK-SAME: task_id=[[SECOND_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_barrier_begin:
// CHECK-SAME: parallel_id=[[SECOND_PARALLEL_ID]],
// CHECK-SAME: task_id=[[SECOND_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_wait_barrier_begin:
// CHECK-SAME: parallel_id=[[SECOND_PARALLEL_ID]],
// CHECK-SAME: task_id=[[SECOND_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[SECOND_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: second_task_id=[[SECOND_EXPLICIT_TASK_ID]],
// CHECK-SAME: prior_task_status=ompt_task_switch=7
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_control_tool:
// CHECK-SAME: command=5, modifier=1, arg=(nil),
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: task level 0:
// CHECK-SAME: task_id=[[SECOND_EXPLICIT_TASK_ID]]
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: task level 1:
// CHECK-SAME: task_id=[[SECOND_MASTER_IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: task level 2:
// CHECK-SAME: task_id=[[SECOND_INITIAL_TASK_ID]]
// CHECK: {{^}}[[_2ND_MSTR_TID]]:
// CHECK-SAME: second_tool: parallel level 0: parallel_id=[[SECOND_PARALLEL_ID]]
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: parallel level 1:
// CHECK-SAME: parallel_id={{[0-9]+}}
// CHECK: {{^}}[[_2ND_MSTR_TID]]:
// CHECK-SAME: second_tool: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[SECOND_EXPLICIT_TASK_ID]],
// CHECK-SAME: second_task_id=[[SECOND_MASTER_IMPLICIT_TASK_ID]],
// CHECK-SAME: prior_task_status=ompt_task_complete=1
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_task_end:
// CHECK-SAME: task_id=[[SECOND_EXPLICIT_TASK_ID]]
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_wait_barrier_end:
// CHECK-SAME: parallel_id=0,
// CHECK-SAME: task_id=[[SECOND_MASTER_IMPLICIT_TASK_ID]], codeptr_ra=(nil)
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_barrier_end:
// CHECK-SAME: parallel_id=0,
// CHECK-SAME: task_id=[[SECOND_MASTER_IMPLICIT_TASK_ID]], codeptr_ra=(nil)
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_implicit_task_end:
// CHECK-SAME: parallel_id=0,
// CHECK-SAME: task_id=[[SECOND_MASTER_IMPLICIT_TASK_ID]], team_size=2,
// CHECK-SAME: thread_num=0
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_parallel_end:
// CHECK-SAME: parallel_id=[[SECOND_PARALLEL_ID]],
// CHECK-SAME: task_id=[[SECOND_INITIAL_TASK_ID]], invoker
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[_2ND_MSTR_TID]]: second_tool: ompt_event_thread_end:
// CHECK-SAME: thread_id=[[_2ND_MSTR_TID]]
// CHECK: {{^}}[[_1ST_WRKR_TID:[0-9]+]]: _first_tool: ompt_event_thread_begin:
// CHECK-SAME: thread_type=ompt_thread_worker=2,
// CHECK-SAME: thread_id=[[_1ST_WRKR_TID]]
// CHECK: {{^}}[[_1ST_WRKR_TID]]: _first_tool: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[_FIRST_PARALLEL_ID]],
// CHECK-SAME: task_id=[[_FIRST_WORKER_IMPLICIT_TASK_ID:[0-9]+]], team_size=2,
// CHECK-SAME: thread_num=1
// CHECK: {{^}}[[_1ST_WRKR_TID]]: _first_tool: ompt_event_barrier_begin:
// CHECK-SAME: parallel_id=[[_FIRST_PARALLEL_ID]],
// CHECK-SAME: task_id=[[_FIRST_WORKER_IMPLICIT_TASK_ID]], codeptr_ra=(nil)
// CHECK: {{^}}[[_1ST_WRKR_TID]]: _first_tool: ompt_event_wait_barrier_begin:
// CHECK-SAME: parallel_id=[[_FIRST_PARALLEL_ID]],
// CHECK-SAME: task_id=[[_FIRST_WORKER_IMPLICIT_TASK_ID]], codeptr_ra=(nil)
// CHECK: {{^}}[[_1ST_WRKR_TID]]: _first_tool: ompt_event_wait_barrier_end:
// CHECK-SAME: parallel_id=0,
// CHECK-SAME: task_id=[[_FIRST_WORKER_IMPLICIT_TASK_ID]], codeptr_ra=(nil)
// CHECK: {{^}}[[_1ST_WRKR_TID]]: _first_tool: ompt_event_barrier_end:
// CHECK-SAME: parallel_id=0,
// CHECK-SAME: task_id=[[_FIRST_WORKER_IMPLICIT_TASK_ID]], codeptr_ra=(nil)
// CHECK: {{^}}[[_1ST_WRKR_TID]]: _first_tool: ompt_event_implicit_task_end:
// CHECK-SAME: parallel_id=0,
// CHECK-SAME: task_id=[[_FIRST_WORKER_IMPLICIT_TASK_ID]], team_size=0,
// thread_num=1
// CHECK: {{^}}[[_1ST_WRKR_TID]]: _first_tool: ompt_event_thread_end:
// CHECK-SAME: thread_id=[[_1ST_WRKR_TID]]
// CHECK: {{^}}[[_2ND_WRKR_TID:[0-9]+]]: second_tool: ompt_event_thread_begin:
// CHECK-SAME: thread_type=ompt_thread_worker=2,
// CHECK-SAME: thread_id=[[_2ND_WRKR_TID]]
// CHECK: {{^}}[[_2ND_WRKR_TID]]: second_tool: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[SECOND_PARALLEL_ID]],
// CHECK-SAME: task_id=[[SECOND_WORKER_IMPLICIT_TASK_ID:[0-9]+]], team_size=2,
// CHECK-SAME: thread_num=1
// CHECK: {{^}}[[_2ND_WRKR_TID]]: second_tool: ompt_event_barrier_begin:
// CHECK-SAME: parallel_id=[[SECOND_PARALLEL_ID]],
// CHECK-SAME: task_id=[[SECOND_WORKER_IMPLICIT_TASK_ID]], codeptr_ra=(nil)
// CHECK: {{^}}[[_2ND_WRKR_TID]]: second_tool: ompt_event_wait_barrier_begin:
// CHECK-SAME: parallel_id=[[SECOND_PARALLEL_ID]],
// CHECK-SAME: task_id=[[SECOND_WORKER_IMPLICIT_TASK_ID]], codeptr_ra=(nil)
// CHECK: {{^}}[[_2ND_WRKR_TID]]: second_tool: ompt_event_wait_barrier_end:
// CHECK-SAME: parallel_id=0,
// CHECK-SAME: task_id=[[SECOND_WORKER_IMPLICIT_TASK_ID]], codeptr_ra=(nil)
// CHECK: {{^}}[[_2ND_WRKR_TID]]: second_tool: ompt_event_barrier_end:
// CHECK-SAME: parallel_id=0,
// CHECK-SAME: task_id=[[SECOND_WORKER_IMPLICIT_TASK_ID]], codeptr_ra=(nil)
// CHECK: {{^}}[[_2ND_WRKR_TID]]: second_tool: ompt_event_implicit_task_end:
// CHECK-SAME: parallel_id=0,
// CHECK-SAME: task_id=[[SECOND_WORKER_IMPLICIT_TASK_ID]], team_size=0,
// CHECK-SAME: thread_num=1
// CHECK: {{^}}[[_2ND_WRKR_TID]]: second_tool: ompt_event_thread_end:
// CHECK-SAME: thread_id=[[_2ND_WRKR_TID]]
#endif /* APP */
|
libsais16.c | /*--
This file is a part of libsais, a library for linear time
suffix array and burrows wheeler transform construction.
Copyright (c) 2021-2022 Ilya Grebnov <ilya.grebnov@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Please see the file LICENSE for full copyright information.
--*/
#include "libsais16.h"
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#if defined(_OPENMP)
#include <omp.h>
#else
#define UNUSED(_x) (void)(_x)
#endif
typedef int32_t sa_sint_t;
typedef uint32_t sa_uint_t;
typedef ptrdiff_t fast_sint_t;
typedef size_t fast_uint_t;
#define SAINT_BIT (32)
#define SAINT_MAX INT32_MAX
#define SAINT_MIN INT32_MIN
#define ALPHABET_SIZE (1 << CHAR_BIT << CHAR_BIT)
#define UNBWT_FASTBITS (17)
#define SUFFIX_GROUP_BIT (SAINT_BIT - 1)
#define SUFFIX_GROUP_MARKER (((sa_sint_t)1) << (SUFFIX_GROUP_BIT - 1))
#define BUCKETS_INDEX2(_c, _s) (((_c) << 1) + (_s))
#define BUCKETS_INDEX4(_c, _s) (((_c) << 2) + (_s))
#define LIBSAIS_PER_THREAD_CACHE_SIZE (24576)
typedef struct LIBSAIS_THREAD_CACHE
{
sa_sint_t symbol;
sa_sint_t index;
} LIBSAIS_THREAD_CACHE;
typedef union LIBSAIS_THREAD_STATE
{
struct
{
fast_sint_t position;
fast_sint_t count;
fast_sint_t m;
fast_sint_t last_lms_suffix;
sa_sint_t * buckets;
LIBSAIS_THREAD_CACHE * cache;
} state;
uint8_t padding[64];
} LIBSAIS_THREAD_STATE;
typedef struct LIBSAIS_CONTEXT
{
sa_sint_t * buckets;
LIBSAIS_THREAD_STATE * thread_state;
fast_sint_t threads;
} LIBSAIS_CONTEXT;
typedef struct LIBSAIS_UNBWT_CONTEXT
{
sa_uint_t * bucket2;
uint16_t * fastbits;
sa_uint_t * buckets;
fast_sint_t threads;
} LIBSAIS_UNBWT_CONTEXT;
#if defined(__GNUC__) || defined(__clang__)
#define RESTRICT __restrict__
#elif defined(_MSC_VER) || defined(__INTEL_COMPILER)
#define RESTRICT __restrict
#else
#error Your compiler, configuration or platform is not supported.
#endif
#if defined(__has_builtin)
#if __has_builtin(__builtin_prefetch)
#define HAS_BUILTIN_PREFECTCH
#endif
#elif defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 2)) || (__GNUC__ >= 4)
#define HAS_BUILTIN_PREFECTCH
#endif
#if defined(HAS_BUILTIN_PREFECTCH)
#define libsais16_prefetch(address) __builtin_prefetch((const void *)(address), 0, 0)
#define libsais16_prefetchw(address) __builtin_prefetch((const void *)(address), 1, 0)
#elif defined (_M_IX86) || defined (_M_AMD64)
#include <intrin.h>
#define libsais16_prefetch(address) _mm_prefetch((const void *)(address), _MM_HINT_NTA)
#define libsais16_prefetchw(address) _m_prefetchw((const void *)(address))
#elif defined (_M_ARM)
#include <intrin.h>
#define libsais16_prefetch(address) __prefetch((const void *)(address))
#define libsais16_prefetchw(address) __prefetchw((const void *)(address))
#elif defined (_M_ARM64)
#include <intrin.h>
#define libsais16_prefetch(address) __prefetch2((const void *)(address), 1)
#define libsais16_prefetchw(address) __prefetch2((const void *)(address), 17)
#else
#error Your compiler, configuration or platform is not supported.
#endif
#if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
#if defined(_LITTLE_ENDIAN) \
|| (defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && BYTE_ORDER == LITTLE_ENDIAN) \
|| (defined(_BYTE_ORDER) && defined(_LITTLE_ENDIAN) && _BYTE_ORDER == _LITTLE_ENDIAN) \
|| (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN) \
|| (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#define __LITTLE_ENDIAN__
#elif defined(_BIG_ENDIAN) \
|| (defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN) \
|| (defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && _BYTE_ORDER == _BIG_ENDIAN) \
|| (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && __BYTE_ORDER == __BIG_ENDIAN) \
|| (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
#define __BIG_ENDIAN__
#elif defined(_WIN32)
#define __LITTLE_ENDIAN__
#endif
#endif
static void * libsais16_align_up(const void * address, size_t alignment)
{
return (void *)((((ptrdiff_t)address) + ((ptrdiff_t)alignment) - 1) & (-((ptrdiff_t)alignment)));
}
static void * libsais16_alloc_aligned(size_t size, size_t alignment)
{
void * address = malloc(size + sizeof(short) + alignment - 1);
if (address != NULL)
{
void * aligned_address = libsais16_align_up((void *)((ptrdiff_t)address + (ptrdiff_t)(sizeof(short))), alignment);
((short *)aligned_address)[-1] = (short)((ptrdiff_t)aligned_address - (ptrdiff_t)address);
return aligned_address;
}
return NULL;
}
static void libsais16_free_aligned(void * aligned_address)
{
if (aligned_address != NULL)
{
free((void *)((ptrdiff_t)aligned_address - ((short *)aligned_address)[-1]));
}
}
static LIBSAIS_THREAD_STATE * libsais16_alloc_thread_state(sa_sint_t threads)
{
LIBSAIS_THREAD_STATE * RESTRICT thread_state = (LIBSAIS_THREAD_STATE *)libsais16_alloc_aligned((size_t)threads * sizeof(LIBSAIS_THREAD_STATE), 4096);
sa_sint_t * RESTRICT thread_buckets = (sa_sint_t *)libsais16_alloc_aligned((size_t)threads * 4 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096);
LIBSAIS_THREAD_CACHE * RESTRICT thread_cache = (LIBSAIS_THREAD_CACHE *)libsais16_alloc_aligned((size_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE * sizeof(LIBSAIS_THREAD_CACHE), 4096);
if (thread_state != NULL && thread_buckets != NULL && thread_cache != NULL)
{
fast_sint_t t;
for (t = 0; t < threads; ++t)
{
thread_state[t].state.buckets = thread_buckets; thread_buckets += 4 * ALPHABET_SIZE;
thread_state[t].state.cache = thread_cache; thread_cache += LIBSAIS_PER_THREAD_CACHE_SIZE;
}
return thread_state;
}
libsais16_free_aligned(thread_cache);
libsais16_free_aligned(thread_buckets);
libsais16_free_aligned(thread_state);
return NULL;
}
static void libsais16_free_thread_state(LIBSAIS_THREAD_STATE * thread_state)
{
if (thread_state != NULL)
{
libsais16_free_aligned(thread_state[0].state.cache);
libsais16_free_aligned(thread_state[0].state.buckets);
libsais16_free_aligned(thread_state);
}
}
static LIBSAIS_CONTEXT * libsais16_create_ctx_main(sa_sint_t threads)
{
LIBSAIS_CONTEXT * RESTRICT ctx = (LIBSAIS_CONTEXT *)libsais16_alloc_aligned(sizeof(LIBSAIS_CONTEXT), 64);
sa_sint_t * RESTRICT buckets = (sa_sint_t *)libsais16_alloc_aligned(8 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096);
LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais16_alloc_thread_state(threads) : NULL;
if (ctx != NULL && buckets != NULL && (thread_state != NULL || threads == 1))
{
ctx->buckets = buckets;
ctx->threads = threads;
ctx->thread_state = thread_state;
return ctx;
}
libsais16_free_thread_state(thread_state);
libsais16_free_aligned(buckets);
libsais16_free_aligned(ctx);
return NULL;
}
static void libsais16_free_ctx_main(LIBSAIS_CONTEXT * ctx)
{
if (ctx != NULL)
{
libsais16_free_thread_state(ctx->thread_state);
libsais16_free_aligned(ctx->buckets);
libsais16_free_aligned(ctx);
}
}
#if defined(_OPENMP)
static sa_sint_t libsais16_count_negative_marked_suffixes(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
sa_sint_t count = 0;
fast_sint_t i; for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i) { count += (SA[i] < 0); }
return count;
}
static sa_sint_t libsais16_count_zero_marked_suffixes(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
sa_sint_t count = 0;
fast_sint_t i; for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i) { count += (SA[i] == 0); }
return count;
}
static void libsais16_place_cached_suffixes(sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais16_prefetch(&cache[i + 2 * prefetch_distance]);
libsais16_prefetchw(&SA[cache[i + prefetch_distance + 0].symbol]);
libsais16_prefetchw(&SA[cache[i + prefetch_distance + 1].symbol]);
libsais16_prefetchw(&SA[cache[i + prefetch_distance + 2].symbol]);
libsais16_prefetchw(&SA[cache[i + prefetch_distance + 3].symbol]);
SA[cache[i + 0].symbol] = cache[i + 0].index;
SA[cache[i + 1].symbol] = cache[i + 1].index;
SA[cache[i + 2].symbol] = cache[i + 2].index;
SA[cache[i + 3].symbol] = cache[i + 3].index;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
SA[cache[i].symbol] = cache[i].index;
}
}
static void libsais16_compact_and_place_cached_suffixes(sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j, l;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4)
{
libsais16_prefetchw(&cache[i + prefetch_distance]);
cache[l] = cache[i + 0]; l += cache[l].symbol >= 0;
cache[l] = cache[i + 1]; l += cache[l].symbol >= 0;
cache[l] = cache[i + 2]; l += cache[l].symbol >= 0;
cache[l] = cache[i + 3]; l += cache[l].symbol >= 0;
}
for (j += 3; i < j; i += 1)
{
cache[l] = cache[i]; l += cache[l].symbol >= 0;
}
libsais16_place_cached_suffixes(SA, cache, omp_block_start, l - omp_block_start);
}
static void libsais16_accumulate_counts_s32_2(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s]; }
}
static void libsais16_accumulate_counts_s32_3(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s]; }
}
static void libsais16_accumulate_counts_s32_4(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s]; }
}
static void libsais16_accumulate_counts_s32_5(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride;
sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s]; }
}
static void libsais16_accumulate_counts_s32_6(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride;
sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride;
sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s]; }
}
static void libsais16_accumulate_counts_s32_7(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride;
sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride;
sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride;
sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s]; }
}
static void libsais16_accumulate_counts_s32_8(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride;
sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride;
sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride;
sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride;
sa_sint_t * RESTRICT bucket07 = bucket06 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s] + bucket07[s]; }
}
static void libsais16_accumulate_counts_s32_9(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride;
sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride;
sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride;
sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride;
sa_sint_t * RESTRICT bucket07 = bucket06 - bucket_stride;
sa_sint_t * RESTRICT bucket08 = bucket07 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s] + bucket07[s] + bucket08[s]; }
}
static void libsais16_accumulate_counts_s32(sa_sint_t * RESTRICT buckets, fast_sint_t bucket_size, fast_sint_t bucket_stride, fast_sint_t num_buckets)
{
while (num_buckets >= 9)
{
libsais16_accumulate_counts_s32_9(buckets - (num_buckets - 9) * bucket_stride, bucket_size, bucket_stride); num_buckets -= 8;
}
switch (num_buckets)
{
case 1: break;
case 2: libsais16_accumulate_counts_s32_2(buckets, bucket_size, bucket_stride); break;
case 3: libsais16_accumulate_counts_s32_3(buckets, bucket_size, bucket_stride); break;
case 4: libsais16_accumulate_counts_s32_4(buckets, bucket_size, bucket_stride); break;
case 5: libsais16_accumulate_counts_s32_5(buckets, bucket_size, bucket_stride); break;
case 6: libsais16_accumulate_counts_s32_6(buckets, bucket_size, bucket_stride); break;
case 7: libsais16_accumulate_counts_s32_7(buckets, bucket_size, bucket_stride); break;
case 8: libsais16_accumulate_counts_s32_8(buckets, bucket_size, bucket_stride); break;
}
}
#endif
static void libsais16_gather_lms_suffixes_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, fast_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
if (omp_block_size > 0)
{
const fast_sint_t prefetch_distance = 128;
fast_sint_t i, j = omp_block_start + omp_block_size, c0 = T[omp_block_start + omp_block_size - 1], c1 = -1;
while (j < n && (c1 = T[j]) == c0) { ++j; }
fast_uint_t s = c0 >= c1;
for (i = omp_block_start + omp_block_size - 2, j = omp_block_start + 3; i >= j; i -= 4)
{
libsais16_prefetch(&T[i - prefetch_distance]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1);
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1);
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1);
}
for (j -= 3; i >= j; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
}
SA[m] = (sa_sint_t)(i + 1);
}
}
static void libsais16_gather_lms_suffixes_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
libsais16_gather_lms_suffixes_16u(T, SA, n, (fast_sint_t)n - 1, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
fast_sint_t t, m = 0; for (t = omp_num_threads - 1; t > omp_thread_num; --t) { m += thread_state[t].state.m; }
libsais16_gather_lms_suffixes_16u(T, SA, n, (fast_sint_t)n - 1 - m, omp_block_start, omp_block_size);
#pragma omp barrier
if (thread_state[omp_thread_num].state.m > 0)
{
SA[(fast_sint_t)n - 1 - m] = (sa_sint_t)thread_state[omp_thread_num].state.last_lms_suffix;
}
}
#endif
}
}
static sa_sint_t libsais16_gather_lms_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t i = n - 2;
sa_sint_t m = n - 1;
fast_uint_t s = 1;
fast_sint_t c0 = T[n - 1];
fast_sint_t c1 = 0;
for (; i >= 3; i -= 4)
{
libsais16_prefetch(&T[i - prefetch_distance]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((s & 3) == 1);
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 0; m -= ((s & 3) == 1);
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i - 1; m -= ((s & 3) == 1);
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 2; m -= ((s & 3) == 1);
}
for (; i >= 0; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((s & 3) == 1);
}
return n - 1 - m;
}
static sa_sint_t libsais16_gather_compacted_lms_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t i = n - 2;
sa_sint_t m = n - 1;
fast_uint_t s = 1;
fast_sint_t c0 = T[n - 1];
fast_sint_t c1 = 0;
for (; i >= 3; i -= 4)
{
libsais16_prefetch(&T[i - prefetch_distance]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((fast_sint_t)(s & 3) == (c0 >= 0));
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 0; m -= ((fast_sint_t)(s & 3) == (c1 >= 0));
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i - 1; m -= ((fast_sint_t)(s & 3) == (c0 >= 0));
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 2; m -= ((fast_sint_t)(s & 3) == (c1 >= 0));
}
for (; i >= 0; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((fast_sint_t)(s & 3) == (c1 >= 0));
}
return n - 1 - m;
}
#if defined(_OPENMP)
static void libsais16_count_lms_suffixes_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, 4 * (size_t)k * sizeof(sa_sint_t));
sa_sint_t i = n - 2;
fast_uint_t s = 1;
fast_sint_t c0 = T[n - 1];
fast_sint_t c1 = 0;
for (; i >= prefetch_distance + 3; i -= 4)
{
libsais16_prefetch(&T[i - 2 * prefetch_distance]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 0], 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 1], 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 2], 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 3], 0)]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
}
for (; i >= 0; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
}
buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]++;
}
#endif
static void libsais16_count_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t));
sa_sint_t i = n - 2;
fast_uint_t s = 1;
fast_sint_t c0 = T[n - 1];
fast_sint_t c1 = 0;
for (; i >= prefetch_distance + 3; i -= 4)
{
libsais16_prefetch(&T[i - 2 * prefetch_distance]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0], 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1], 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2], 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3], 0)]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
for (; i >= 0; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
buckets[BUCKETS_INDEX2((fast_uint_t)c0, 0)]++;
}
#if defined(_OPENMP)
static void libsais16_count_compacted_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t));
sa_sint_t i = n - 2;
fast_uint_t s = 1;
fast_sint_t c0 = T[n - 1];
fast_sint_t c1 = 0;
for (; i >= prefetch_distance + 3; i -= 4)
{
libsais16_prefetch(&T[i - 2 * prefetch_distance]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0] & SAINT_MAX, 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1] & SAINT_MAX, 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2] & SAINT_MAX, 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3] & SAINT_MAX, 0)]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
for (; i >= 0; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, 0)]++;
}
#endif
static sa_sint_t libsais16_count_and_gather_lms_suffixes_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t m = omp_block_start + omp_block_size - 1;
if (omp_block_size > 0)
{
const fast_sint_t prefetch_distance = 128;
fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1;
while (j < n && (c1 = T[j]) == c0) { ++j; }
fast_uint_t s = c0 >= c1;
for (i = m - 1, j = omp_block_start + 3; i >= j; i -= 4)
{
libsais16_prefetch(&T[i - prefetch_distance]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
}
for (j -= 3; i >= j; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
}
c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
}
return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m);
}
static sa_sint_t libsais16_count_and_gather_lms_suffixes_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t m = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
m = libsais16_count_and_gather_lms_suffixes_16u(T, SA, n, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size;
thread_state[omp_thread_num].state.m = libsais16_count_and_gather_lms_suffixes_16u(T, SA, n, thread_state[omp_thread_num].state.buckets, omp_block_start, omp_block_size);
if (thread_state[omp_thread_num].state.m > 0)
{
thread_state[omp_thread_num].state.last_lms_suffix = SA[thread_state[omp_thread_num].state.position - 1];
}
}
#pragma omp barrier
#pragma omp master
{
memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
m += (sa_sint_t)thread_state[t].state.m;
if (t != omp_num_threads - 1 && thread_state[t].state.m > 0)
{
memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.m], (size_t)thread_state[t].state.m * sizeof(sa_sint_t));
}
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t s; for (s = 0; s < 4 * ALPHABET_SIZE; s += 1) { sa_sint_t A = buckets[s], B = temp_bucket[s]; buckets[s] = A + B; temp_bucket[s] = A; }
}
}
}
}
#endif
}
return m;
}
static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
memset(buckets, 0, 4 * (size_t)k * sizeof(sa_sint_t));
fast_sint_t m = omp_block_start + omp_block_size - 1;
if (omp_block_size > 0)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1;
while (j < n && (c1 = T[j]) == c0) { ++j; }
fast_uint_t s = c0 >= c1;
for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4)
{
libsais16_prefetch(&T[i - 2 * prefetch_distance]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 0], 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 1], 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 2], 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 3], 0)]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
}
for (j -= prefetch_distance + 3; i >= j; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
}
c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
}
return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m);
}
static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t));
fast_sint_t m = omp_block_start + omp_block_size - 1;
if (omp_block_size > 0)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1;
while (j < n && (c1 = T[j]) == c0) { ++j; }
fast_uint_t s = c0 >= c1;
for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4)
{
libsais16_prefetch(&T[i - 2 * prefetch_distance]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0], 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1], 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2], 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3], 0)]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
for (j -= prefetch_distance + 3; i >= j; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
}
return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m);
}
static sa_sint_t libsais16_count_and_gather_compacted_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t));
fast_sint_t m = omp_block_start + omp_block_size - 1;
if (omp_block_size > 0)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1;
while (j < n && (c1 = T[j]) == c0) { ++j; }
fast_uint_t s = c0 >= c1;
for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4)
{
libsais16_prefetch(&T[i - 2 * prefetch_distance]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0] & SAINT_MAX, 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1] & SAINT_MAX, 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2] & SAINT_MAX, 0)]);
libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3] & SAINT_MAX, 0)]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0));
c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((fast_sint_t)(s & 3) == (c1 >= 0));
c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0));
c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((fast_sint_t)(s & 3) == (c1 >= 0));
c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
for (j -= prefetch_distance + 3; i >= j; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c1 >= 0));
c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0));
c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
}
return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m);
}
#if defined(_OPENMP)
static fast_sint_t libsais16_get_bucket_stride(fast_sint_t free_space, fast_sint_t bucket_size, fast_sint_t num_buckets)
{
fast_sint_t bucket_size_1024 = (bucket_size + 1023) & (-1024); if (free_space / (num_buckets - 1) >= bucket_size_1024) { return bucket_size_1024; }
fast_sint_t bucket_size_16 = (bucket_size + 15) & (-16); if (free_space / (num_buckets - 1) >= bucket_size_16) { return bucket_size_16; }
return bucket_size;
}
static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_4k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t m = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
m = libsais16_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
fast_sint_t bucket_size = 4 * (fast_sint_t)k;
fast_sint_t bucket_stride = libsais16_get_bucket_stride(buckets - &SA[n], bucket_size, omp_num_threads);
{
thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size;
thread_state[omp_thread_num].state.count = libsais16_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size);
}
#pragma omp barrier
if (omp_thread_num == omp_num_threads - 1)
{
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
m += (sa_sint_t)thread_state[t].state.count;
if (t != omp_num_threads - 1 && thread_state[t].state.count > 0)
{
memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.count], (size_t)thread_state[t].state.count * sizeof(sa_sint_t));
}
}
}
else
{
omp_num_threads = omp_num_threads - 1;
omp_block_stride = (bucket_size / omp_num_threads) & (-16);
omp_block_start = omp_thread_num * omp_block_stride;
omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start;
libsais16_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads + 1);
}
}
#endif
}
return m;
}
static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_2k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t m = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
m = libsais16_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
fast_sint_t bucket_size = 2 * (fast_sint_t)k;
fast_sint_t bucket_stride = libsais16_get_bucket_stride(buckets - &SA[n], bucket_size, omp_num_threads);
{
thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size;
thread_state[omp_thread_num].state.count = libsais16_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size);
}
#pragma omp barrier
if (omp_thread_num == omp_num_threads - 1)
{
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
m += (sa_sint_t)thread_state[t].state.count;
if (t != omp_num_threads - 1 && thread_state[t].state.count > 0)
{
memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.count], (size_t)thread_state[t].state.count * sizeof(sa_sint_t));
}
}
}
else
{
omp_num_threads = omp_num_threads - 1;
omp_block_stride = (bucket_size / omp_num_threads) & (-16);
omp_block_start = omp_thread_num * omp_block_stride;
omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start;
libsais16_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads + 1);
}
}
#endif
}
return m;
}
static void libsais16_count_and_gather_compacted_lms_suffixes_32s_2k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
libsais16_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA, n, k, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
fast_sint_t bucket_size = 2 * (fast_sint_t)k;
fast_sint_t bucket_stride = libsais16_get_bucket_stride(buckets - &SA[n + n], bucket_size, omp_num_threads);
{
thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size;
thread_state[omp_thread_num].state.count = libsais16_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA + n, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t t, m = 0; for (t = omp_num_threads - 1; t >= omp_thread_num; --t) { m += (sa_sint_t)thread_state[t].state.count; }
if (thread_state[omp_thread_num].state.count > 0)
{
memcpy(&SA[n - m], &SA[n + thread_state[omp_thread_num].state.position - thread_state[omp_thread_num].state.count], (size_t)thread_state[omp_thread_num].state.count * sizeof(sa_sint_t));
}
}
{
omp_block_stride = (bucket_size / omp_num_threads) & (-16);
omp_block_start = omp_thread_num * omp_block_stride;
omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start;
libsais16_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads);
}
}
#endif
}
}
#endif
static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_4k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads)
{
sa_sint_t m = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads);
fast_sint_t omp_num_threads = 1;
#endif
if (omp_num_threads == 1)
{
m = libsais16_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets, 0, n);
}
#if defined(_OPENMP)
else if (omp_thread_num == 0)
{
libsais16_count_lms_suffixes_32s_4k(T, n, k, buckets);
}
else
{
m = libsais16_gather_lms_suffixes_32s(T, SA, n);
}
#endif
}
return m;
}
static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_2k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads)
{
sa_sint_t m = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads);
fast_sint_t omp_num_threads = 1;
#endif
if (omp_num_threads == 1)
{
m = libsais16_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n);
}
#if defined(_OPENMP)
else if (omp_thread_num == 0)
{
libsais16_count_lms_suffixes_32s_2k(T, n, k, buckets);
}
else
{
m = libsais16_gather_lms_suffixes_32s(T, SA, n);
}
#endif
}
return m;
}
static sa_sint_t libsais16_count_and_gather_compacted_lms_suffixes_32s_2k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads)
{
sa_sint_t m = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads);
fast_sint_t omp_num_threads = 1;
#endif
if (omp_num_threads == 1)
{
m = libsais16_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n);
}
#if defined(_OPENMP)
else if (omp_thread_num == 0)
{
libsais16_count_compacted_lms_suffixes_32s_2k(T, n, k, buckets);
}
else
{
m = libsais16_gather_compacted_lms_suffixes_32s(T, SA, n);
}
#endif
}
return m;
}
static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t m;
#if defined(_OPENMP)
sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n]) / ((4 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; }
if (max_threads > 1 && n >= 65536 && n / k >= 2)
{
if (max_threads > n / 16 / k) { max_threads = n / 16 / k; }
m = libsais16_count_and_gather_lms_suffixes_32s_4k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state);
}
else
#else
UNUSED(thread_state);
#endif
{
m = libsais16_count_and_gather_lms_suffixes_32s_4k_nofs_omp(T, SA, n, k, buckets, threads);
}
return m;
}
static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t m;
#if defined(_OPENMP)
sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n]) / ((2 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; }
if (max_threads > 1 && n >= 65536 && n / k >= 2)
{
if (max_threads > n / 8 / k) { max_threads = n / 8 / k; }
m = libsais16_count_and_gather_lms_suffixes_32s_2k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state);
}
else
#else
UNUSED(thread_state);
#endif
{
m = libsais16_count_and_gather_lms_suffixes_32s_2k_nofs_omp(T, SA, n, k, buckets, threads);
}
return m;
}
static void libsais16_count_and_gather_compacted_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n + n]) / ((2 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; }
if (max_threads > 1 && n >= 65536 && n / k >= 2)
{
if (max_threads > n / 8 / k) { max_threads = n / 8 / k; }
libsais16_count_and_gather_compacted_lms_suffixes_32s_2k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state);
}
else
#else
UNUSED(thread_state);
#endif
{
libsais16_count_and_gather_compacted_lms_suffixes_32s_2k_nofs_omp(T, SA, n, k, buckets, threads);
}
}
static void libsais16_count_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, (size_t)k * sizeof(sa_sint_t));
fast_sint_t i, j;
for (i = 0, j = (fast_sint_t)n - 7; i < j; i += 8)
{
libsais16_prefetch(&T[i + prefetch_distance]);
buckets[T[i + 0]]++;
buckets[T[i + 1]]++;
buckets[T[i + 2]]++;
buckets[T[i + 3]]++;
buckets[T[i + 4]]++;
buckets[T[i + 5]]++;
buckets[T[i + 6]]++;
buckets[T[i + 7]]++;
}
for (j += 7; i < j; i += 1)
{
buckets[T[i]]++;
}
}
static void libsais16_initialize_buckets_start_and_end_16u(sa_sint_t * RESTRICT buckets, sa_sint_t * RESTRICT freq)
{
sa_sint_t * RESTRICT bucket_start = &buckets[6 * ALPHABET_SIZE];
sa_sint_t * RESTRICT bucket_end = &buckets[7 * ALPHABET_SIZE];
if (freq != NULL)
{
fast_sint_t i, j; sa_sint_t sum = 0;
for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1)
{
bucket_start[j] = sum;
sum += (freq[j] = buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)]);
bucket_end[j] = sum;
}
}
else
{
fast_sint_t i, j; sa_sint_t sum = 0;
for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1)
{
bucket_start[j] = sum;
sum += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)];
bucket_end[j] = sum;
}
}
}
static void libsais16_initialize_buckets_start_and_end_32s_6k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
sa_sint_t * RESTRICT bucket_start = &buckets[4 * k];
sa_sint_t * RESTRICT bucket_end = &buckets[5 * k];
fast_sint_t i, j; sa_sint_t sum = 0;
for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1)
{
bucket_start[j] = sum;
sum += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)];
bucket_end[j] = sum;
}
}
static void libsais16_initialize_buckets_start_and_end_32s_4k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
sa_sint_t * RESTRICT bucket_start = &buckets[2 * k];
sa_sint_t * RESTRICT bucket_end = &buckets[3 * k];
fast_sint_t i, j; sa_sint_t sum = 0;
for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1)
{
bucket_start[j] = sum;
sum += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)];
bucket_end[j] = sum;
}
}
static void libsais16_initialize_buckets_end_32s_2k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
fast_sint_t i; sa_sint_t sum0 = 0;
for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0))
{
sum0 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; buckets[i + BUCKETS_INDEX2(0, 0)] = sum0;
}
}
static void libsais16_initialize_buckets_start_and_end_32s_2k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
fast_sint_t i, j;
for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1)
{
buckets[j] = buckets[i];
}
buckets[k] = 0; memcpy(&buckets[k + 1], buckets, ((size_t)k - 1) * sizeof(sa_sint_t));
}
static void libsais16_initialize_buckets_start_32s_1k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
fast_sint_t i; sa_sint_t sum = 0;
for (i = 0; i <= (fast_sint_t)k - 1; i += 1) { sa_sint_t tmp = buckets[i]; buckets[i] = sum; sum += tmp; }
}
static void libsais16_initialize_buckets_end_32s_1k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
fast_sint_t i; sa_sint_t sum = 0;
for (i = 0; i <= (fast_sint_t)k - 1; i += 1) { sum += buckets[i]; buckets[i] = sum; }
}
static sa_sint_t libsais16_initialize_buckets_for_lms_suffixes_radix_sort_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix)
{
{
fast_uint_t s = 0;
fast_sint_t c0 = T[first_lms_suffix];
fast_sint_t c1 = 0;
for (; --first_lms_suffix >= 0; )
{
c1 = c0; c0 = T[first_lms_suffix]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]--;
}
buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]--;
}
{
sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE];
fast_sint_t i, j; sa_sint_t sum = 0;
for (i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0))
{
temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum; sum += buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 3)]; temp_bucket[j] = sum;
}
return sum;
}
}
static void libsais16_initialize_buckets_for_lms_suffixes_radix_sort_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix)
{
buckets[BUCKETS_INDEX2(T[first_lms_suffix], 0)]++;
buckets[BUCKETS_INDEX2(T[first_lms_suffix], 1)]--;
fast_sint_t i; sa_sint_t sum0 = 0, sum1 = 0;
for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0))
{
sum0 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)];
sum1 += buckets[i + BUCKETS_INDEX2(0, 1)];
buckets[i + BUCKETS_INDEX2(0, 0)] = sum0;
buckets[i + BUCKETS_INDEX2(0, 1)] = sum1;
}
}
static sa_sint_t libsais16_initialize_buckets_for_lms_suffixes_radix_sort_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix)
{
{
fast_uint_t s = 0;
fast_sint_t c0 = T[first_lms_suffix];
fast_sint_t c1 = 0;
for (; --first_lms_suffix >= 0; )
{
c1 = c0; c0 = T[first_lms_suffix]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]--;
}
buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]--;
}
{
sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k];
fast_sint_t i, j; sa_sint_t sum = 0;
for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1)
{
sum += buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 3)]; temp_bucket[j] = sum;
}
return sum;
}
}
static void libsais16_initialize_buckets_for_radix_and_partial_sorting_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix)
{
sa_sint_t * RESTRICT bucket_start = &buckets[2 * k];
sa_sint_t * RESTRICT bucket_end = &buckets[3 * k];
buckets[BUCKETS_INDEX2(T[first_lms_suffix], 0)]++;
buckets[BUCKETS_INDEX2(T[first_lms_suffix], 1)]--;
fast_sint_t i, j; sa_sint_t sum0 = 0, sum1 = 0;
for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1)
{
bucket_start[j] = sum1;
sum0 += buckets[i + BUCKETS_INDEX2(0, 1)];
sum1 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)];
buckets[i + BUCKETS_INDEX2(0, 1)] = sum0;
bucket_end[j] = sum1;
}
}
static void libsais16_radix_sort_lms_suffixes_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4)
{
libsais16_prefetch(&SA[i - 2 * prefetch_distance]);
libsais16_prefetch(&T[SA[i - prefetch_distance - 0]]);
libsais16_prefetch(&T[SA[i - prefetch_distance - 1]]);
libsais16_prefetch(&T[SA[i - prefetch_distance - 2]]);
libsais16_prefetch(&T[SA[i - prefetch_distance - 3]]);
sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[BUCKETS_INDEX2(T[p0], 0)]] = p0;
sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[BUCKETS_INDEX2(T[p1], 0)]] = p1;
sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[BUCKETS_INDEX2(T[p2], 0)]] = p2;
sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[BUCKETS_INDEX2(T[p3], 0)]] = p3;
}
for (j -= prefetch_distance + 3; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[--induction_bucket[BUCKETS_INDEX2(T[p], 0)]] = p;
}
}
static void libsais16_radix_sort_lms_suffixes_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && m >= 65536 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_num_threads = 1;
#endif
if (omp_num_threads == 1)
{
libsais16_radix_sort_lms_suffixes_16u(T, SA, &buckets[4 * ALPHABET_SIZE], (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1);
}
#if defined(_OPENMP)
else
{
{
sa_sint_t * RESTRICT src_bucket = &buckets[4 * ALPHABET_SIZE];
sa_sint_t * RESTRICT dst_bucket = thread_state[omp_thread_num].state.buckets;
fast_sint_t i, j;
for (i = BUCKETS_INDEX2(0, 0), j = BUCKETS_INDEX4(0, 1); i <= BUCKETS_INDEX2(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX2(1, 0), j += BUCKETS_INDEX4(1, 0))
{
dst_bucket[i] = src_bucket[i] - dst_bucket[j];
}
}
{
fast_sint_t t, omp_block_start = 0, omp_block_size = thread_state[omp_thread_num].state.m;
for (t = omp_num_threads - 1; t >= omp_thread_num; --t) omp_block_start += thread_state[t].state.m;
if (omp_block_start == (fast_sint_t)m && omp_block_size > 0)
{
omp_block_start -= 1; omp_block_size -= 1;
}
libsais16_radix_sort_lms_suffixes_16u(T, SA, thread_state[omp_thread_num].state.buckets, (fast_sint_t)n - omp_block_start, omp_block_size);
}
}
#endif
}
}
static void libsais16_radix_sort_lms_suffixes_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 3; i >= j; i -= 4)
{
libsais16_prefetch(&SA[i - 3 * prefetch_distance]);
libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 0]]);
libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 1]]);
libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 2]]);
libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 3]]);
libsais16_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 0]]]);
libsais16_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 1]]]);
libsais16_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 2]]]);
libsais16_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 3]]]);
sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[T[p0]]] = p0;
sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[T[p1]]] = p1;
sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[T[p2]]] = p2;
sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[T[p3]]] = p3;
}
for (j -= 2 * prefetch_distance + 3; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[--induction_bucket[T[p]]] = p;
}
}
static void libsais16_radix_sort_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 3; i >= j; i -= 4)
{
libsais16_prefetch(&SA[i - 3 * prefetch_distance]);
libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 0]]);
libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 1]]);
libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 2]]);
libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 3]]);
libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 0]], 0)]);
libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 1]], 0)]);
libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 2]], 0)]);
libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 3]], 0)]);
sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[BUCKETS_INDEX2(T[p0], 0)]] = p0;
sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[BUCKETS_INDEX2(T[p1], 0)]] = p1;
sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[BUCKETS_INDEX2(T[p2], 0)]] = p2;
sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[BUCKETS_INDEX2(T[p3], 0)]] = p3;
}
for (j -= 2 * prefetch_distance + 3; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[--induction_bucket[BUCKETS_INDEX2(T[p], 0)]] = p;
}
}
#if defined(_OPENMP)
static void libsais16_radix_sort_lms_suffixes_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais16_prefetch(&SA[i + 2 * prefetch_distance]);
libsais16_prefetch(&T[SA[i + prefetch_distance + 0]]);
libsais16_prefetch(&T[SA[i + prefetch_distance + 1]]);
libsais16_prefetch(&T[SA[i + prefetch_distance + 2]]);
libsais16_prefetch(&T[SA[i + prefetch_distance + 3]]);
libsais16_prefetchw(&cache[i + prefetch_distance]);
cache[i + 0].symbol = T[cache[i + 0].index = SA[i + 0]];
cache[i + 1].symbol = T[cache[i + 1].index = SA[i + 1]];
cache[i + 2].symbol = T[cache[i + 2].index = SA[i + 2]];
cache[i + 3].symbol = T[cache[i + 3].index = SA[i + 3]];
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
cache[i].symbol = T[cache[i].index = SA[i]];
}
}
static void libsais16_radix_sort_lms_suffixes_32s_6k_block_sort(sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4)
{
libsais16_prefetchw(&cache[i - 2 * prefetch_distance]);
libsais16_prefetchw(&induction_bucket[cache[i - prefetch_distance - 0].symbol]);
libsais16_prefetchw(&induction_bucket[cache[i - prefetch_distance - 1].symbol]);
libsais16_prefetchw(&induction_bucket[cache[i - prefetch_distance - 2].symbol]);
libsais16_prefetchw(&induction_bucket[cache[i - prefetch_distance - 3].symbol]);
cache[i - 0].symbol = --induction_bucket[cache[i - 0].symbol];
cache[i - 1].symbol = --induction_bucket[cache[i - 1].symbol];
cache[i - 2].symbol = --induction_bucket[cache[i - 2].symbol];
cache[i - 3].symbol = --induction_bucket[cache[i - 3].symbol];
}
for (j -= prefetch_distance + 3; i >= j; i -= 1)
{
cache[i].symbol = --induction_bucket[cache[i].symbol];
}
}
static void libsais16_radix_sort_lms_suffixes_32s_2k_block_sort(sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4)
{
libsais16_prefetchw(&cache[i - 2 * prefetch_distance]);
libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 0].symbol, 0)]);
libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 1].symbol, 0)]);
libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 2].symbol, 0)]);
libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 3].symbol, 0)]);
cache[i - 0].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 0].symbol, 0)];
cache[i - 1].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 1].symbol, 0)];
cache[i - 2].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 2].symbol, 0)];
cache[i - 3].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 3].symbol, 0)];
}
for (j -= prefetch_distance + 3; i >= j; i -= 1)
{
cache[i].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i].symbol, 0)];
}
}
static void libsais16_radix_sort_lms_suffixes_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais16_radix_sort_lms_suffixes_32s_6k(T, SA, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais16_radix_sort_lms_suffixes_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
libsais16_radix_sort_lms_suffixes_32s_6k_block_sort(induction_bucket, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais16_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
}
static void libsais16_radix_sort_lms_suffixes_32s_2k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais16_radix_sort_lms_suffixes_32s_2k(T, SA, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais16_radix_sort_lms_suffixes_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
libsais16_radix_sort_lms_suffixes_32s_2k_block_sort(induction_bucket, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais16_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
}
#endif
static void libsais16_radix_sort_lms_suffixes_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || m < 65536)
{
libsais16_radix_sort_lms_suffixes_32s_6k(T, SA, induction_bucket, (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = 0; block_start < (fast_sint_t)m - 1; block_start = block_end)
{
block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end >= m) { block_end = (fast_sint_t)m - 1; }
libsais16_radix_sort_lms_suffixes_32s_6k_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, (fast_sint_t)n - block_end, block_end - block_start, threads);
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais16_radix_sort_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || m < 65536)
{
libsais16_radix_sort_lms_suffixes_32s_2k(T, SA, induction_bucket, (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = 0; block_start < (fast_sint_t)m - 1; block_start = block_end)
{
block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end >= m) { block_end = (fast_sint_t)m - 1; }
libsais16_radix_sort_lms_suffixes_32s_2k_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, (fast_sint_t)n - block_end, block_end - block_start, threads);
}
}
#else
UNUSED(thread_state);
#endif
}
static sa_sint_t libsais16_radix_sort_lms_suffixes_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t i = n - 2;
sa_sint_t m = 0;
fast_uint_t s = 1;
fast_sint_t c0 = T[n - 1];
fast_sint_t c1 = 0;
fast_sint_t c2 = 0;
for (; i >= prefetch_distance + 3; i -= 4)
{
libsais16_prefetch(&T[i - 2 * prefetch_distance]);
libsais16_prefetchw(&buckets[T[i - prefetch_distance - 0]]);
libsais16_prefetchw(&buckets[T[i - prefetch_distance - 1]]);
libsais16_prefetchw(&buckets[T[i - prefetch_distance - 2]]);
libsais16_prefetchw(&buckets[T[i - prefetch_distance - 3]]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
if ((s & 3) == 1) { SA[--buckets[c2 = c0]] = i + 1; m++; }
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i - 0; m++; }
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
if ((s & 3) == 1) { SA[--buckets[c2 = c0]] = i - 1; m++; }
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i - 2; m++; }
}
for (; i >= 0; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i + 1; m++; }
}
if (m > 1)
{
SA[buckets[c2]] = 0;
}
return m;
}
static void libsais16_radix_sort_set_markers_32s_6k(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais16_prefetch(&induction_bucket[i + 2 * prefetch_distance]);
libsais16_prefetchw(&SA[induction_bucket[i + prefetch_distance + 0]]);
libsais16_prefetchw(&SA[induction_bucket[i + prefetch_distance + 1]]);
libsais16_prefetchw(&SA[induction_bucket[i + prefetch_distance + 2]]);
libsais16_prefetchw(&SA[induction_bucket[i + prefetch_distance + 3]]);
SA[induction_bucket[i + 0]] |= SAINT_MIN;
SA[induction_bucket[i + 1]] |= SAINT_MIN;
SA[induction_bucket[i + 2]] |= SAINT_MIN;
SA[induction_bucket[i + 3]] |= SAINT_MIN;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
SA[induction_bucket[i]] |= SAINT_MIN;
}
}
static void libsais16_radix_sort_set_markers_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais16_prefetch(&induction_bucket[BUCKETS_INDEX2(i + 2 * prefetch_distance, 0)]);
libsais16_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 0, 0)]]);
libsais16_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 1, 0)]]);
libsais16_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 2, 0)]]);
libsais16_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 3, 0)]]);
SA[induction_bucket[BUCKETS_INDEX2(i + 0, 0)]] |= SUFFIX_GROUP_MARKER;
SA[induction_bucket[BUCKETS_INDEX2(i + 1, 0)]] |= SUFFIX_GROUP_MARKER;
SA[induction_bucket[BUCKETS_INDEX2(i + 2, 0)]] |= SUFFIX_GROUP_MARKER;
SA[induction_bucket[BUCKETS_INDEX2(i + 3, 0)]] |= SUFFIX_GROUP_MARKER;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
SA[induction_bucket[BUCKETS_INDEX2(i, 0)]] |= SUFFIX_GROUP_MARKER;
}
}
static void libsais16_radix_sort_set_markers_32s_6k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && k >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
fast_sint_t omp_block_stride = (((fast_sint_t)k - 1) / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)k - 1 - omp_block_start;
#else
UNUSED(threads);
fast_sint_t omp_block_start = 0;
fast_sint_t omp_block_size = (fast_sint_t)k - 1;
#endif
libsais16_radix_sort_set_markers_32s_6k(SA, induction_bucket, omp_block_start, omp_block_size);
}
}
static void libsais16_radix_sort_set_markers_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && k >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
fast_sint_t omp_block_stride = (((fast_sint_t)k - 1) / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)k - 1 - omp_block_start;
#else
UNUSED(threads);
fast_sint_t omp_block_start = 0;
fast_sint_t omp_block_size = (fast_sint_t)k - 1;
#endif
libsais16_radix_sort_set_markers_32s_4k(SA, induction_bucket, omp_block_start, omp_block_size);
}
}
static void libsais16_initialize_buckets_for_partial_sorting_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count)
{
sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE];
buckets[BUCKETS_INDEX4((fast_uint_t)T[first_lms_suffix], 1)]++;
fast_sint_t i, j; sa_sint_t sum0 = left_suffixes_count + 1, sum1 = 0;
for (i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0))
{
temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0;
sum0 += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 2)];
sum1 += buckets[i + BUCKETS_INDEX4(0, 1)];
buckets[j + BUCKETS_INDEX2(0, 0)] = sum0;
buckets[j + BUCKETS_INDEX2(0, 1)] = sum1;
}
}
static void libsais16_initialize_buckets_for_partial_sorting_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count)
{
sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k];
fast_sint_t i, j; sa_sint_t sum0 = left_suffixes_count + 1, sum1 = 0, sum2 = 0;
for (first_lms_suffix = T[first_lms_suffix], i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4((fast_sint_t)first_lms_suffix - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0))
{
sa_sint_t SS = buckets[i + BUCKETS_INDEX4(0, 0)];
sa_sint_t LS = buckets[i + BUCKETS_INDEX4(0, 1)];
sa_sint_t SL = buckets[i + BUCKETS_INDEX4(0, 2)];
sa_sint_t LL = buckets[i + BUCKETS_INDEX4(0, 3)];
buckets[i + BUCKETS_INDEX4(0, 0)] = sum0;
buckets[i + BUCKETS_INDEX4(0, 1)] = sum2;
buckets[i + BUCKETS_INDEX4(0, 2)] = 0;
buckets[i + BUCKETS_INDEX4(0, 3)] = 0;
sum0 += SS + SL; sum1 += LS; sum2 += LS + LL;
temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0;
temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum1;
}
for (sum1 += 1; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0))
{
sa_sint_t SS = buckets[i + BUCKETS_INDEX4(0, 0)];
sa_sint_t LS = buckets[i + BUCKETS_INDEX4(0, 1)];
sa_sint_t SL = buckets[i + BUCKETS_INDEX4(0, 2)];
sa_sint_t LL = buckets[i + BUCKETS_INDEX4(0, 3)];
buckets[i + BUCKETS_INDEX4(0, 0)] = sum0;
buckets[i + BUCKETS_INDEX4(0, 1)] = sum2;
buckets[i + BUCKETS_INDEX4(0, 2)] = 0;
buckets[i + BUCKETS_INDEX4(0, 3)] = 0;
sum0 += SS + SL; sum1 += LS; sum2 += LS + LL;
temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0;
temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum1;
}
}
static sa_sint_t libsais16_partial_sorting_scan_left_to_right_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetch(&SA[i + 2 * prefetch_distance]);
libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2);
libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2);
sa_sint_t p0 = SA[i + 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]);
SA[induction_bucket[v0]++] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d;
sa_sint_t p1 = SA[i + 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]);
SA[induction_bucket[v1]++] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]);
SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d;
}
return d;
}
#if defined(_OPENMP)
static void libsais16_partial_sorting_scan_left_to_right_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size, LIBSAIS_THREAD_STATE * RESTRICT state)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0; sa_sint_t d = 1;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetch(&SA[i + 2 * prefetch_distance]);
libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2);
libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2);
sa_sint_t p0 = cache[count].index = SA[i + 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = cache[count++].symbol = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); induction_bucket[v0]++; distinct_names[v0] = d;
sa_sint_t p1 = cache[count].index = SA[i + 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = cache[count++].symbol = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); induction_bucket[v1]++; distinct_names[v1] = d;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = cache[count].index = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = cache[count++].symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]); induction_bucket[v]++; distinct_names[v] = d;
}
state[0].state.position = (fast_sint_t)d - 1;
state[0].state.count = count;
}
static void libsais16_partial_sorting_scan_left_to_right_16u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count, sa_sint_t d)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t i, j;
for (i = 0, j = count - 1; i < j; i += 2)
{
libsais16_prefetch(&cache[i + prefetch_distance]);
sa_sint_t p0 = cache[i + 0].index; d += (p0 < 0); sa_sint_t v0 = cache[i + 0].symbol;
SA[induction_bucket[v0]++] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d;
sa_sint_t p1 = cache[i + 1].index; d += (p1 < 0); sa_sint_t v1 = cache[i + 1].symbol;
SA[induction_bucket[v1]++] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d;
}
for (j += 1; i < j; i += 1)
{
sa_sint_t p = cache[i].index; d += (p < 0); sa_sint_t v = cache[i].symbol;
SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d;
}
}
static sa_sint_t libsais16_partial_sorting_scan_left_to_right_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
d = libsais16_partial_sorting_scan_left_to_right_16u(T, SA, buckets, d, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais16_partial_sorting_scan_left_to_right_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size, &thread_state[omp_thread_num]);
}
#pragma omp barrier
#pragma omp master
{
sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t t;
for (t = 0; t < omp_num_threads; ++t)
{
sa_sint_t * RESTRICT temp_induction_bucket = &thread_state[t].state.buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT temp_distinct_names = &thread_state[t].state.buckets[2 * ALPHABET_SIZE];
fast_sint_t c;
for (c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_induction_bucket[c]; induction_bucket[c] = A + B; temp_induction_bucket[c] = A; }
for (d -= 1, c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = distinct_names[c], B = temp_distinct_names[c], D = B + d; distinct_names[c] = B > 0 ? D : A; temp_distinct_names[c] = A; }
d += 1 + (sa_sint_t)thread_state[t].state.position; thread_state[t].state.position = (fast_sint_t)d - thread_state[t].state.position;
}
}
#pragma omp barrier
{
libsais16_partial_sorting_scan_left_to_right_16u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count, (sa_sint_t)thread_state[omp_thread_num].state.position);
}
}
#endif
}
return d;
}
#endif
static sa_sint_t libsais16_partial_sorting_scan_left_to_right_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
SA[induction_bucket[BUCKETS_INDEX2(T[n - 1], T[n - 2] >= T[n - 1])]++] = (n - 1) | SAINT_MIN;
distinct_names[BUCKETS_INDEX2(T[n - 1], T[n - 2] >= T[n - 1])] = ++d;
if (threads == 1 || left_suffixes_count < 65536)
{
d = libsais16_partial_sorting_scan_left_to_right_16u(T, SA, buckets, d, 0, left_suffixes_count);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = 0; block_start < left_suffixes_count; )
{
if (SA[block_start] == 0)
{
block_start++;
}
else
{
fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > left_suffixes_count) { block_max_end = left_suffixes_count;}
fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; }
fast_sint_t block_size = block_end - block_start;
if (block_size < 32)
{
for (; block_start < block_end; block_start += 1)
{
sa_sint_t p = SA[block_start]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]);
SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d;
}
}
else
{
d = libsais16_partial_sorting_scan_left_to_right_16u_block_omp(T, SA, buckets, d, block_start, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
return d;
}
static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetch(&SA[i + 3 * prefetch_distance]);
libsais16_prefetch(&T[SA[i + 2 * prefetch_distance + 0] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i + 2 * prefetch_distance + 0] & SAINT_MAX] - 2);
libsais16_prefetch(&T[SA[i + 2 * prefetch_distance + 1] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i + 2 * prefetch_distance + 1] & SAINT_MAX] - 2);
sa_sint_t p0 = SA[i + prefetch_distance + 0] & SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX4(T[p0 - (p0 > 0)], 0); libsais16_prefetchw(&buckets[v0]);
sa_sint_t p1 = SA[i + prefetch_distance + 1] & SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX4(T[p1 - (p1 > 0)], 0); libsais16_prefetchw(&buckets[v1]);
sa_sint_t p2 = SA[i + 0]; d += (p2 < 0); p2 &= SAINT_MAX; sa_sint_t v2 = BUCKETS_INDEX4(T[p2 - 1], T[p2 - 2] >= T[p2 - 1]);
SA[buckets[v2]++] = (p2 - 1) | ((sa_sint_t)(buckets[2 + v2] != d) << (SAINT_BIT - 1)); buckets[2 + v2] = d;
sa_sint_t p3 = SA[i + 1]; d += (p3 < 0); p3 &= SAINT_MAX; sa_sint_t v3 = BUCKETS_INDEX4(T[p3 - 1], T[p3 - 2] >= T[p3 - 1]);
SA[buckets[v3]++] = (p3 - 1) | ((sa_sint_t)(buckets[2 + v3] != d) << (SAINT_BIT - 1)); buckets[2 + v3] = d;
}
for (j += 2 * prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX4(T[p - 1], T[p - 2] >= T[p - 1]);
SA[buckets[v]++] = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d;
}
return d;
}
static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k];
sa_sint_t * RESTRICT distinct_names = &buckets[0 * k];
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 3 * prefetch_distance]);
sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { const fast_sint_t Ts2 = T[(s2 & ~SUFFIX_GROUP_MARKER) - 1]; libsais16_prefetchw(&induction_bucket[Ts2]); libsais16_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts2, 0)]); }
sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { const fast_sint_t Ts3 = T[(s3 & ~SUFFIX_GROUP_MARKER) - 1]; libsais16_prefetchw(&induction_bucket[Ts3]); libsais16_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts3, 0)]); }
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX;
if (p0 > 0)
{
SA[i + 0] = 0; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); p0 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] < T[p0 - 1]);
SA[induction_bucket[T[p0 - 1]]++] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d;
}
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX;
if (p1 > 0)
{
SA[i + 1] = 0; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); p1 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] < T[p1 - 1]);
SA[induction_bucket[T[p1 - 1]]++] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d;
}
}
for (j += 2 * prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX;
if (p > 0)
{
SA[i] = 0; d += (p >> (SUFFIX_GROUP_BIT - 1)); p &= ~SUFFIX_GROUP_MARKER; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] < T[p - 1]);
SA[induction_bucket[T[p - 1]]++] = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d;
}
}
return d;
}
static void libsais16_partial_sorting_scan_left_to_right_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 3 * prefetch_distance]);
sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { libsais16_prefetchw(&induction_bucket[T[s2 - 1]]); libsais16_prefetch(&T[s2] - 2); }
sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { libsais16_prefetchw(&induction_bucket[T[s3 - 1]]); libsais16_prefetch(&T[s3] - 2); }
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { SA[i + 0] = 0; SA[induction_bucket[T[p0 - 1]]++] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { SA[i + 1] = 0; SA[induction_bucket[T[p1 - 1]]++] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)); }
}
for (j += 2 * prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { SA[i] = 0; SA[induction_bucket[T[p - 1]]++] = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)); }
}
}
#if defined(_OPENMP)
static void libsais16_partial_sorting_scan_left_to_right_32s_6k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetch(&SA[i + 2 * prefetch_distance]);
libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2);
libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2);
libsais16_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t p0 = cache[i + 0].index = SA[i + 0]; sa_sint_t symbol0 = 0; p0 &= SAINT_MAX; if (p0 != 0) { symbol0 = BUCKETS_INDEX4(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); } cache[i + 0].symbol = symbol0;
sa_sint_t p1 = cache[i + 1].index = SA[i + 1]; sa_sint_t symbol1 = 0; p1 &= SAINT_MAX; if (p1 != 0) { symbol1 = BUCKETS_INDEX4(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); } cache[i + 1].symbol = symbol1;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = cache[i].index = SA[i]; sa_sint_t symbol = 0; p &= SAINT_MAX; if (p != 0) { symbol = BUCKETS_INDEX4(T[p - 1], T[p - 2] >= T[p - 1]); } cache[i].symbol = symbol;
}
}
static void libsais16_partial_sorting_scan_left_to_right_32s_4k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
libsais16_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { cache[i + 0].index = p0; p0 &= ~SUFFIX_GROUP_MARKER; symbol0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] < T[p0 - 1]); p0 = 0; } cache[i + 0].symbol = symbol0; SA[i + 0] = p0 & SAINT_MAX;
sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { cache[i + 1].index = p1; p1 &= ~SUFFIX_GROUP_MARKER; symbol1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] < T[p1 - 1]); p1 = 0; } cache[i + 1].symbol = symbol1; SA[i + 1] = p1 & SAINT_MAX;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { cache[i].index = p; p &= ~SUFFIX_GROUP_MARKER; symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] < T[p - 1]); p = 0; } cache[i].symbol = symbol; SA[i] = p & SAINT_MAX;
}
}
static void libsais16_partial_sorting_scan_left_to_right_32s_1k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
libsais16_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)); symbol0 = T[p0 - 1]; p0 = 0; } cache[i + 0].symbol = symbol0; SA[i + 0] = p0 & SAINT_MAX;
sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)); symbol1 = T[p1 - 1]; p1 = 0; } cache[i + 1].symbol = symbol1; SA[i + 1] = p1 & SAINT_MAX;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { cache[i].index = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)); symbol = T[p - 1]; p = 0; } cache[i].symbol = symbol; SA[i] = p & SAINT_MAX;
}
}
static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_6k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size;
for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&cache[i + 2 * prefetch_distance]);
libsais16_prefetchw(&buckets[cache[i + prefetch_distance + 0].symbol]);
libsais16_prefetchw(&buckets[cache[i + prefetch_distance + 1].symbol]);
sa_sint_t v0 = cache[i + 0].symbol, p0 = cache[i + 0].index; d += (p0 < 0); cache[i + 0].symbol = buckets[v0]++; cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(buckets[2 + v0] != d) << (SAINT_BIT - 1)); buckets[2 + v0] = d;
if (cache[i + 0].symbol < omp_block_end) { sa_sint_t s = cache[i + 0].symbol, q = (cache[s].index = cache[i + 0].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); }
sa_sint_t v1 = cache[i + 1].symbol, p1 = cache[i + 1].index; d += (p1 < 0); cache[i + 1].symbol = buckets[v1]++; cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(buckets[2 + v1] != d) << (SAINT_BIT - 1)); buckets[2 + v1] = d;
if (cache[i + 1].symbol < omp_block_end) { sa_sint_t s = cache[i + 1].symbol, q = (cache[s].index = cache[i + 1].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); }
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t v = cache[i].symbol, p = cache[i].index; d += (p < 0); cache[i].symbol = buckets[v]++; cache[i].index = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d;
if (cache[i].symbol < omp_block_end) { sa_sint_t s = cache[i].symbol, q = (cache[s].index = cache[i].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); }
}
return d;
}
static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_4k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k];
sa_sint_t * RESTRICT distinct_names = &buckets[0 * k];
fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size;
for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&cache[i + 2 * prefetch_distance]);
sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0 >> 1]; libsais16_prefetchw(s0 >= 0 ? Is0 : NULL); const sa_sint_t * Ds0 = &distinct_names[s0]; libsais16_prefetchw(s0 >= 0 ? Ds0 : NULL);
sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1 >> 1]; libsais16_prefetchw(s1 >= 0 ? Is1 : NULL); const sa_sint_t * Ds1 = &distinct_names[s1]; libsais16_prefetchw(s1 >= 0 ? Ds1 : NULL);
sa_sint_t v0 = cache[i + 0].symbol;
if (v0 >= 0)
{
sa_sint_t p0 = cache[i + 0].index; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); cache[i + 0].symbol = induction_bucket[v0 >> 1]++; cache[i + 0].index = (p0 - 1) | (v0 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d;
if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i + 0].index = np & SAINT_MAX; }
}
sa_sint_t v1 = cache[i + 1].symbol;
if (v1 >= 0)
{
sa_sint_t p1 = cache[i + 1].index; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); cache[i + 1].symbol = induction_bucket[v1 >> 1]++; cache[i + 1].index = (p1 - 1) | (v1 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d;
if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i + 1].index = np & SAINT_MAX; }
}
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t v = cache[i].symbol;
if (v >= 0)
{
sa_sint_t p = cache[i].index; d += (p >> (SUFFIX_GROUP_BIT - 1)); cache[i].symbol = induction_bucket[v >> 1]++; cache[i].index = (p - 1) | (v << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d;
if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i].index = np & SAINT_MAX; }
}
}
return d;
}
static void libsais16_partial_sorting_scan_left_to_right_32s_1k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size;
for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&cache[i + 2 * prefetch_distance]);
sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais16_prefetchw(s0 >= 0 ? Is0 : NULL);
sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais16_prefetchw(s1 >= 0 ? Is1 : NULL);
sa_sint_t v0 = cache[i + 0].symbol;
if (v0 >= 0)
{
cache[i + 0].symbol = induction_bucket[v0]++;
if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i + 0].index = np & SAINT_MAX; }
}
sa_sint_t v1 = cache[i + 1].symbol;
if (v1 >= 0)
{
cache[i + 1].symbol = induction_bucket[v1]++;
if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i + 1].index = np & SAINT_MAX; }
}
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t v = cache[i].symbol;
if (v >= 0)
{
cache[i].symbol = induction_bucket[v]++;
if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i].index = np & SAINT_MAX; }
}
}
}
static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
d = libsais16_partial_sorting_scan_left_to_right_32s_6k(T, SA, buckets, d, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais16_partial_sorting_scan_left_to_right_32s_6k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
d = libsais16_partial_sorting_scan_left_to_right_32s_6k_block_sort(T, buckets, d, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais16_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
return d;
}
static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_4k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
d = libsais16_partial_sorting_scan_left_to_right_32s_4k(T, SA, k, buckets, d, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais16_partial_sorting_scan_left_to_right_32s_4k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
d = libsais16_partial_sorting_scan_left_to_right_32s_4k_block_sort(T, k, buckets, d, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais16_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
return d;
}
static void libsais16_partial_sorting_scan_left_to_right_32s_1k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais16_partial_sorting_scan_left_to_right_32s_1k(T, SA, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais16_partial_sorting_scan_left_to_right_32s_1k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
libsais16_partial_sorting_scan_left_to_right_32s_1k_block_sort(T, buckets, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais16_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
}
#endif
static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
SA[buckets[BUCKETS_INDEX4(T[n - 1], T[n - 2] >= T[n - 1])]++] = (n - 1) | SAINT_MIN;
buckets[2 + BUCKETS_INDEX4(T[n - 1], T[n - 2] >= T[n - 1])] = ++d;
if (threads == 1 || left_suffixes_count < 65536)
{
d = libsais16_partial_sorting_scan_left_to_right_32s_6k(T, SA, buckets, d, 0, left_suffixes_count);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = 0; block_start < left_suffixes_count; block_start = block_end)
{
block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > left_suffixes_count) { block_end = left_suffixes_count; }
d = libsais16_partial_sorting_scan_left_to_right_32s_6k_block_omp(T, SA, buckets, d, thread_state[0].state.cache, block_start, block_end - block_start, threads);
}
}
#else
UNUSED(thread_state);
#endif
return d;
}
static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k];
sa_sint_t * RESTRICT distinct_names = &buckets[0 * k];
SA[induction_bucket[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1)) | SUFFIX_GROUP_MARKER;
distinct_names[BUCKETS_INDEX2(T[n - 1], T[n - 2] < T[n - 1])] = ++d;
if (threads == 1 || n < 65536)
{
d = libsais16_partial_sorting_scan_left_to_right_32s_4k(T, SA, k, buckets, d, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = 0; block_start < n; block_start = block_end)
{
block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; }
d = libsais16_partial_sorting_scan_left_to_right_32s_4k_block_omp(T, SA, k, buckets, d, thread_state[0].state.cache, block_start, block_end - block_start, threads);
}
}
#else
UNUSED(thread_state);
#endif
return d;
}
static void libsais16_partial_sorting_scan_left_to_right_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
SA[buckets[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1));
if (threads == 1 || n < 65536)
{
libsais16_partial_sorting_scan_left_to_right_32s_1k(T, SA, buckets, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = 0; block_start < n; block_start = block_end)
{
block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; }
libsais16_partial_sorting_scan_left_to_right_32s_1k_block_omp(T, SA, buckets, thread_state[0].state.cache, block_start, block_end - block_start, threads);
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais16_partial_sorting_shift_markers_16u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, const sa_sint_t * RESTRICT buckets, sa_sint_t threads)
{
const fast_sint_t prefetch_distance = 32;
const sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE];
fast_sint_t c;
#if defined(_OPENMP)
#pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && n >= 65536)
#else
UNUSED(threads); UNUSED(n);
#endif
for (c = BUCKETS_INDEX2(ALPHABET_SIZE - 1, 0); c >= BUCKETS_INDEX2(1, 0); c -= BUCKETS_INDEX2(1, 0))
{
fast_sint_t i, j; sa_sint_t s = SAINT_MIN;
for (i = (fast_sint_t)temp_bucket[c] - 1, j = (fast_sint_t)buckets[c - BUCKETS_INDEX2(1, 0)] + 3; i >= j; i -= 4)
{
libsais16_prefetchw(&SA[i - prefetch_distance]);
sa_sint_t p0 = SA[i - 0], q0 = (p0 & SAINT_MIN) ^ s; s = s ^ q0; SA[i - 0] = p0 ^ q0;
sa_sint_t p1 = SA[i - 1], q1 = (p1 & SAINT_MIN) ^ s; s = s ^ q1; SA[i - 1] = p1 ^ q1;
sa_sint_t p2 = SA[i - 2], q2 = (p2 & SAINT_MIN) ^ s; s = s ^ q2; SA[i - 2] = p2 ^ q2;
sa_sint_t p3 = SA[i - 3], q3 = (p3 & SAINT_MIN) ^ s; s = s ^ q3; SA[i - 3] = p3 ^ q3;
}
for (j -= 3; i >= j; i -= 1)
{
sa_sint_t p = SA[i], q = (p & SAINT_MIN) ^ s; s = s ^ q; SA[i] = p ^ q;
}
}
}
static void libsais16_partial_sorting_shift_markers_32s_6k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, const sa_sint_t * RESTRICT buckets, sa_sint_t threads)
{
const fast_sint_t prefetch_distance = 32;
const sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k];
fast_sint_t c;
#if defined(_OPENMP)
#pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && k >= 65536)
#else
UNUSED(threads);
#endif
for (c = (fast_sint_t)k - 1; c >= 1; c -= 1)
{
fast_sint_t i, j; sa_sint_t s = SAINT_MIN;
for (i = (fast_sint_t)buckets[BUCKETS_INDEX4(c, 0)] - 1, j = (fast_sint_t)temp_bucket[BUCKETS_INDEX2(c - 1, 0)] + 3; i >= j; i -= 4)
{
libsais16_prefetchw(&SA[i - prefetch_distance]);
sa_sint_t p0 = SA[i - 0], q0 = (p0 & SAINT_MIN) ^ s; s = s ^ q0; SA[i - 0] = p0 ^ q0;
sa_sint_t p1 = SA[i - 1], q1 = (p1 & SAINT_MIN) ^ s; s = s ^ q1; SA[i - 1] = p1 ^ q1;
sa_sint_t p2 = SA[i - 2], q2 = (p2 & SAINT_MIN) ^ s; s = s ^ q2; SA[i - 2] = p2 ^ q2;
sa_sint_t p3 = SA[i - 3], q3 = (p3 & SAINT_MIN) ^ s; s = s ^ q3; SA[i - 3] = p3 ^ q3;
}
for (j -= 3; i >= j; i -= 1)
{
sa_sint_t p = SA[i], q = (p & SAINT_MIN) ^ s; s = s ^ q; SA[i] = p ^ q;
}
}
}
static void libsais16_partial_sorting_shift_markers_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i; sa_sint_t s = SUFFIX_GROUP_MARKER;
for (i = (fast_sint_t)n - 1; i >= 3; i -= 4)
{
libsais16_prefetchw(&SA[i - prefetch_distance]);
sa_sint_t p0 = SA[i - 0], q0 = ((p0 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p0 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q0; SA[i - 0] = p0 ^ q0;
sa_sint_t p1 = SA[i - 1], q1 = ((p1 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p1 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q1; SA[i - 1] = p1 ^ q1;
sa_sint_t p2 = SA[i - 2], q2 = ((p2 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p2 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q2; SA[i - 2] = p2 ^ q2;
sa_sint_t p3 = SA[i - 3], q3 = ((p3 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p3 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q3; SA[i - 3] = p3 ^ q3;
}
for (; i >= 0; i -= 1)
{
sa_sint_t p = SA[i], q = ((p & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q; SA[i] = p ^ q;
}
}
static void libsais16_partial_sorting_shift_buckets_32s_6k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k];
fast_sint_t i;
for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0))
{
buckets[2 * i + BUCKETS_INDEX4(0, 0)] = temp_bucket[i + BUCKETS_INDEX2(0, 0)];
buckets[2 * i + BUCKETS_INDEX4(0, 1)] = temp_bucket[i + BUCKETS_INDEX2(0, 1)];
}
}
static sa_sint_t libsais16_partial_sorting_scan_right_to_left_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetch(&SA[i - 2 * prefetch_distance]);
libsais16_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 2);
libsais16_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 2);
sa_sint_t p0 = SA[i - 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]);
SA[--induction_bucket[v0]] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d;
sa_sint_t p1 = SA[i - 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]);
SA[--induction_bucket[v1]] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d;
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]);
SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d;
}
return d;
}
#if defined(_OPENMP)
static void libsais16_partial_sorting_scan_right_to_left_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size, LIBSAIS_THREAD_STATE * RESTRICT state)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0; sa_sint_t d = 1;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetch(&SA[i - 2 * prefetch_distance]);
libsais16_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 2);
libsais16_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 2);
sa_sint_t p0 = cache[count].index = SA[i - 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = cache[count++].symbol = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); induction_bucket[v0]++; distinct_names[v0] = d;
sa_sint_t p1 = cache[count].index = SA[i - 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = cache[count++].symbol = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); induction_bucket[v1]++; distinct_names[v1] = d;
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = cache[count].index = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = cache[count++].symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); induction_bucket[v]++; distinct_names[v] = d;
}
state[0].state.position = (fast_sint_t)d - 1;
state[0].state.count = count;
}
static void libsais16_partial_sorting_scan_right_to_left_16u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count, sa_sint_t d)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t i, j;
for (i = 0, j = count - 1; i < j; i += 2)
{
libsais16_prefetch(&cache[i + prefetch_distance]);
sa_sint_t p0 = cache[i + 0].index; d += (p0 < 0); sa_sint_t v0 = cache[i + 0].symbol;
SA[--induction_bucket[v0]] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d;
sa_sint_t p1 = cache[i + 1].index; d += (p1 < 0); sa_sint_t v1 = cache[i + 1].symbol;
SA[--induction_bucket[v1]] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d;
}
for (j += 1; i < j; i += 1)
{
sa_sint_t p = cache[i].index; d += (p < 0); sa_sint_t v = cache[i].symbol;
SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d;
}
}
static sa_sint_t libsais16_partial_sorting_scan_right_to_left_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
d = libsais16_partial_sorting_scan_right_to_left_16u(T, SA, buckets, d, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais16_partial_sorting_scan_right_to_left_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size, &thread_state[omp_thread_num]);
}
#pragma omp barrier
#pragma omp master
{
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
sa_sint_t * RESTRICT temp_induction_bucket = &thread_state[t].state.buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT temp_distinct_names = &thread_state[t].state.buckets[2 * ALPHABET_SIZE];
fast_sint_t c;
for (c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_induction_bucket[c]; induction_bucket[c] = A - B; temp_induction_bucket[c] = A; }
for (d -= 1, c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = distinct_names[c], B = temp_distinct_names[c], D = B + d; distinct_names[c] = B > 0 ? D : A; temp_distinct_names[c] = A; }
d += 1 + (sa_sint_t)thread_state[t].state.position; thread_state[t].state.position = (fast_sint_t)d - thread_state[t].state.position;
}
}
#pragma omp barrier
{
libsais16_partial_sorting_scan_right_to_left_16u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count, (sa_sint_t)thread_state[omp_thread_num].state.position);
}
}
#endif
}
return d;
}
#endif
static void libsais16_partial_sorting_scan_right_to_left_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
fast_sint_t scan_start = (fast_sint_t)left_suffixes_count + 1;
fast_sint_t scan_end = (fast_sint_t)n - (fast_sint_t)first_lms_suffix;
if (threads == 1 || (scan_end - scan_start) < 65536)
{
libsais16_partial_sorting_scan_right_to_left_16u(T, SA, buckets, d, scan_start, scan_end - scan_start);
}
#if defined(_OPENMP)
else
{
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t block_start;
for (block_start = scan_end - 1; block_start >= scan_start; )
{
if (SA[block_start] == 0)
{
block_start--;
}
else
{
fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < scan_start) { block_max_end = scan_start - 1; }
fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; }
fast_sint_t block_size = block_start - block_end;
if (block_size < 32)
{
for (; block_start > block_end; block_start -= 1)
{
sa_sint_t p = SA[block_start]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]);
SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d;
}
}
else
{
d = libsais16_partial_sorting_scan_right_to_left_16u_block_omp(T, SA, buckets, d, block_end + 1, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
}
static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetch(&SA[i - 3 * prefetch_distance]);
libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 0] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 0] & SAINT_MAX] - 2);
libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 1] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 1] & SAINT_MAX] - 2);
sa_sint_t p0 = SA[i - prefetch_distance - 0] & SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX4(T[p0 - (p0 > 0)], 0); libsais16_prefetchw(&buckets[v0]);
sa_sint_t p1 = SA[i - prefetch_distance - 1] & SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX4(T[p1 - (p1 > 0)], 0); libsais16_prefetchw(&buckets[v1]);
sa_sint_t p2 = SA[i - 0]; d += (p2 < 0); p2 &= SAINT_MAX; sa_sint_t v2 = BUCKETS_INDEX4(T[p2 - 1], T[p2 - 2] > T[p2 - 1]);
SA[--buckets[v2]] = (p2 - 1) | ((sa_sint_t)(buckets[2 + v2] != d) << (SAINT_BIT - 1)); buckets[2 + v2] = d;
sa_sint_t p3 = SA[i - 1]; d += (p3 < 0); p3 &= SAINT_MAX; sa_sint_t v3 = BUCKETS_INDEX4(T[p3 - 1], T[p3 - 2] > T[p3 - 1]);
SA[--buckets[v3]] = (p3 - 1) | ((sa_sint_t)(buckets[2 + v3] != d) << (SAINT_BIT - 1)); buckets[2 + v3] = d;
}
for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX4(T[p - 1], T[p - 2] > T[p - 1]);
SA[--buckets[v]] = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d;
}
return d;
}
static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[3 * k];
sa_sint_t * RESTRICT distinct_names = &buckets[0 * k];
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&SA[i - 3 * prefetch_distance]);
sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { const fast_sint_t Ts2 = T[(s2 & ~SUFFIX_GROUP_MARKER) - 1]; libsais16_prefetchw(&induction_bucket[Ts2]); libsais16_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts2, 0)]); }
sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { const fast_sint_t Ts3 = T[(s3 & ~SUFFIX_GROUP_MARKER) - 1]; libsais16_prefetchw(&induction_bucket[Ts3]); libsais16_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts3, 0)]); }
sa_sint_t p0 = SA[i - 0];
if (p0 > 0)
{
SA[i - 0] = 0; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); p0 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]);
SA[--induction_bucket[T[p0 - 1]]] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d;
}
sa_sint_t p1 = SA[i - 1];
if (p1 > 0)
{
SA[i - 1] = 0; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); p1 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]);
SA[--induction_bucket[T[p1 - 1]]] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d;
}
}
for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i];
if (p > 0)
{
SA[i] = 0; d += (p >> (SUFFIX_GROUP_BIT - 1)); p &= ~SUFFIX_GROUP_MARKER; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]);
SA[--induction_bucket[T[p - 1]]] = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d;
}
}
return d;
}
static void libsais16_partial_sorting_scan_right_to_left_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&SA[i - 3 * prefetch_distance]);
sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { libsais16_prefetchw(&induction_bucket[T[s2 - 1]]); libsais16_prefetch(&T[s2] - 2); }
sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { libsais16_prefetchw(&induction_bucket[T[s3 - 1]]); libsais16_prefetch(&T[s3] - 2); }
sa_sint_t p0 = SA[i - 0]; if (p0 > 0) { SA[i - 0] = 0; SA[--induction_bucket[T[p0 - 1]]] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i - 1]; if (p1 > 0) { SA[i - 1] = 0; SA[--induction_bucket[T[p1 - 1]]] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)); }
}
for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; if (p > 0) { SA[i] = 0; SA[--induction_bucket[T[p - 1]]] = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)); }
}
}
#if defined(_OPENMP)
static void libsais16_partial_sorting_scan_right_to_left_32s_6k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetch(&SA[i + 2 * prefetch_distance]);
libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2);
libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1);
libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2);
libsais16_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t p0 = cache[i + 0].index = SA[i + 0]; sa_sint_t symbol0 = 0; p0 &= SAINT_MAX; if (p0 != 0) { symbol0 = BUCKETS_INDEX4(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); } cache[i + 0].symbol = symbol0;
sa_sint_t p1 = cache[i + 1].index = SA[i + 1]; sa_sint_t symbol1 = 0; p1 &= SAINT_MAX; if (p1 != 0) { symbol1 = BUCKETS_INDEX4(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); } cache[i + 1].symbol = symbol1;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = cache[i].index = SA[i]; sa_sint_t symbol = 0; p &= SAINT_MAX; if (p != 0) { symbol = BUCKETS_INDEX4(T[p - 1], T[p - 2] > T[p - 1]); } cache[i].symbol = symbol;
}
}
static void libsais16_partial_sorting_scan_right_to_left_32s_4k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
libsais16_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { SA[i + 0] = 0; cache[i + 0].index = p0; p0 &= ~SUFFIX_GROUP_MARKER; symbol0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); } cache[i + 0].symbol = symbol0;
sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { SA[i + 1] = 0; cache[i + 1].index = p1; p1 &= ~SUFFIX_GROUP_MARKER; symbol1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); } cache[i + 1].symbol = symbol1;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { SA[i] = 0; cache[i].index = p; p &= ~SUFFIX_GROUP_MARKER; symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); } cache[i].symbol = symbol;
}
}
static void libsais16_partial_sorting_scan_right_to_left_32s_1k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
libsais16_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { SA[i + 0] = 0; cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)); symbol0 = T[p0 - 1]; } cache[i + 0].symbol = symbol0;
sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { SA[i + 1] = 0; cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)); symbol1 = T[p1 - 1]; } cache[i + 1].symbol = symbol1;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { SA[i] = 0; cache[i].index = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)); symbol = T[p - 1]; } cache[i].symbol = symbol;
}
}
static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_6k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&cache[i - 2 * prefetch_distance]);
libsais16_prefetchw(&buckets[cache[i - prefetch_distance - 0].symbol]);
libsais16_prefetchw(&buckets[cache[i - prefetch_distance - 1].symbol]);
sa_sint_t v0 = cache[i - 0].symbol, p0 = cache[i - 0].index; d += (p0 < 0); cache[i - 0].symbol = --buckets[v0]; cache[i - 0].index = (p0 - 1) | ((sa_sint_t)(buckets[2 + v0] != d) << (SAINT_BIT - 1)); buckets[2 + v0] = d;
if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t s = cache[i - 0].symbol, q = (cache[s].index = cache[i - 0].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); }
sa_sint_t v1 = cache[i - 1].symbol, p1 = cache[i - 1].index; d += (p1 < 0); cache[i - 1].symbol = --buckets[v1]; cache[i - 1].index = (p1 - 1) | ((sa_sint_t)(buckets[2 + v1] != d) << (SAINT_BIT - 1)); buckets[2 + v1] = d;
if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t s = cache[i - 1].symbol, q = (cache[s].index = cache[i - 1].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t v = cache[i].symbol, p = cache[i].index; d += (p < 0); cache[i].symbol = --buckets[v]; cache[i].index = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d;
if (cache[i].symbol >= omp_block_start) { sa_sint_t s = cache[i].symbol, q = (cache[s].index = cache[i].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); }
}
return d;
}
static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_4k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[3 * k];
sa_sint_t * RESTRICT distinct_names = &buckets[0 * k];
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&cache[i - 2 * prefetch_distance]);
sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0 >> 1]; libsais16_prefetchw(s0 >= 0 ? Is0 : NULL); const sa_sint_t * Ds0 = &distinct_names[s0]; libsais16_prefetchw(s0 >= 0 ? Ds0 : NULL);
sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1 >> 1]; libsais16_prefetchw(s1 >= 0 ? Is1 : NULL); const sa_sint_t * Ds1 = &distinct_names[s1]; libsais16_prefetchw(s1 >= 0 ? Ds1 : NULL);
sa_sint_t v0 = cache[i - 0].symbol;
if (v0 >= 0)
{
sa_sint_t p0 = cache[i - 0].index; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); cache[i - 0].symbol = --induction_bucket[v0 >> 1]; cache[i - 0].index = (p0 - 1) | (v0 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d;
if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; if (np > 0) { cache[i - 0].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } }
}
sa_sint_t v1 = cache[i - 1].symbol;
if (v1 >= 0)
{
sa_sint_t p1 = cache[i - 1].index; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); cache[i - 1].symbol = --induction_bucket[v1 >> 1]; cache[i - 1].index = (p1 - 1) | (v1 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d;
if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; if (np > 0) { cache[i - 1].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } }
}
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t v = cache[i].symbol;
if (v >= 0)
{
sa_sint_t p = cache[i].index; d += (p >> (SUFFIX_GROUP_BIT - 1)); cache[i].symbol = --induction_bucket[v >> 1]; cache[i].index = (p - 1) | (v << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d;
if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[i].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } }
}
}
return d;
}
static void libsais16_partial_sorting_scan_right_to_left_32s_1k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&cache[i - 2 * prefetch_distance]);
sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais16_prefetchw(s0 >= 0 ? Is0 : NULL);
sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais16_prefetchw(s1 >= 0 ? Is1 : NULL);
sa_sint_t v0 = cache[i - 0].symbol;
if (v0 >= 0)
{
cache[i - 0].symbol = --induction_bucket[v0];
if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; if (np > 0) { cache[i - 0].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; } }
}
sa_sint_t v1 = cache[i - 1].symbol;
if (v1 >= 0)
{
cache[i - 1].symbol = --induction_bucket[v1];
if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; if (np > 0) { cache[i - 1].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; }}
}
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t v = cache[i].symbol;
if (v >= 0)
{
cache[i].symbol = --induction_bucket[v];
if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[i].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; } }
}
}
}
static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
d = libsais16_partial_sorting_scan_right_to_left_32s_6k(T, SA, buckets, d, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais16_partial_sorting_scan_right_to_left_32s_6k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
d = libsais16_partial_sorting_scan_right_to_left_32s_6k_block_sort(T, buckets, d, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais16_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
return d;
}
static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_4k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
d = libsais16_partial_sorting_scan_right_to_left_32s_4k(T, SA, k, buckets, d, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais16_partial_sorting_scan_right_to_left_32s_4k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
d = libsais16_partial_sorting_scan_right_to_left_32s_4k_block_sort(T, k, buckets, d, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais16_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
return d;
}
static void libsais16_partial_sorting_scan_right_to_left_32s_1k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais16_partial_sorting_scan_right_to_left_32s_1k(T, SA, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais16_partial_sorting_scan_right_to_left_32s_1k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
libsais16_partial_sorting_scan_right_to_left_32s_1k_block_sort(T, buckets, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais16_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
}
#endif
static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
fast_sint_t scan_start = (fast_sint_t)left_suffixes_count + 1;
fast_sint_t scan_end = (fast_sint_t)n - (fast_sint_t)first_lms_suffix;
if (threads == 1 || (scan_end - scan_start) < 65536)
{
d = libsais16_partial_sorting_scan_right_to_left_32s_6k(T, SA, buckets, d, scan_start, scan_end - scan_start);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = scan_end - 1; block_start >= scan_start; block_start = block_end)
{
block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < scan_start) { block_end = scan_start - 1; }
d = libsais16_partial_sorting_scan_right_to_left_32s_6k_block_omp(T, SA, buckets, d, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads);
}
}
#else
UNUSED(thread_state);
#endif
return d;
}
static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || n < 65536)
{
d = libsais16_partial_sorting_scan_right_to_left_32s_4k(T, SA, k, buckets, d, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end)
{
block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; }
d = libsais16_partial_sorting_scan_right_to_left_32s_4k_block_omp(T, SA, k, buckets, d, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads);
}
}
#else
UNUSED(thread_state);
#endif
return d;
}
static void libsais16_partial_sorting_scan_right_to_left_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || n < 65536)
{
libsais16_partial_sorting_scan_right_to_left_32s_1k(T, SA, buckets, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end)
{
block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; }
libsais16_partial_sorting_scan_right_to_left_32s_1k_block_omp(T, SA, buckets, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads);
}
}
#else
UNUSED(thread_state);
#endif
}
static fast_sint_t libsais16_partial_sorting_gather_lms_suffixes_32s_4k(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j, l;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4)
{
libsais16_prefetch(&SA[i + prefetch_distance]);
sa_sint_t s0 = SA[i + 0]; SA[l] = (s0 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s0 < 0);
sa_sint_t s1 = SA[i + 1]; SA[l] = (s1 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s1 < 0);
sa_sint_t s2 = SA[i + 2]; SA[l] = (s2 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s2 < 0);
sa_sint_t s3 = SA[i + 3]; SA[l] = (s3 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s3 < 0);
}
for (j += 3; i < j; i += 1)
{
sa_sint_t s = SA[i]; SA[l] = (s - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s < 0);
}
return l;
}
static fast_sint_t libsais16_partial_sorting_gather_lms_suffixes_32s_1k(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j, l;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4)
{
libsais16_prefetch(&SA[i + prefetch_distance]);
sa_sint_t s0 = SA[i + 0]; SA[l] = s0 & SAINT_MAX; l += (s0 < 0);
sa_sint_t s1 = SA[i + 1]; SA[l] = s1 & SAINT_MAX; l += (s1 < 0);
sa_sint_t s2 = SA[i + 2]; SA[l] = s2 & SAINT_MAX; l += (s2 < 0);
sa_sint_t s3 = SA[i + 3]; SA[l] = s3 & SAINT_MAX; l += (s3 < 0);
}
for (j += 3; i < j; i += 1)
{
sa_sint_t s = SA[i]; SA[l] = s & SAINT_MAX; l += (s < 0);
}
return l;
}
static void libsais16_partial_sorting_gather_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
libsais16_partial_sorting_gather_lms_suffixes_32s_4k(SA, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.position = omp_block_start;
thread_state[omp_thread_num].state.count = libsais16_partial_sorting_gather_lms_suffixes_32s_4k(SA, omp_block_start, omp_block_size) - omp_block_start;
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t, position = 0;
for (t = 0; t < omp_num_threads; ++t)
{
if (t > 0 && thread_state[t].state.count > 0)
{
memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t));
}
position += thread_state[t].state.count;
}
}
}
#endif
}
}
static void libsais16_partial_sorting_gather_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
libsais16_partial_sorting_gather_lms_suffixes_32s_1k(SA, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.position = omp_block_start;
thread_state[omp_thread_num].state.count = libsais16_partial_sorting_gather_lms_suffixes_32s_1k(SA, omp_block_start, omp_block_size) - omp_block_start;
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t, position = 0;
for (t = 0; t < omp_num_threads; ++t)
{
if (t > 0 && thread_state[t].state.count > 0)
{
memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t));
}
position += thread_state[t].state.count;
}
}
}
#endif
}
}
static void libsais16_induce_partial_order_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
memset(&buckets[2 * ALPHABET_SIZE], 0, 2 * ALPHABET_SIZE * sizeof(sa_sint_t));
sa_sint_t d = libsais16_partial_sorting_scan_left_to_right_16u_omp(T, SA, n, buckets, left_suffixes_count, 0, threads, thread_state);
libsais16_partial_sorting_shift_markers_16u_omp(SA, n, buckets, threads);
libsais16_partial_sorting_scan_right_to_left_16u_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, d, threads, thread_state);
}
static void libsais16_induce_partial_order_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t d = libsais16_partial_sorting_scan_left_to_right_32s_6k_omp(T, SA, n, buckets, left_suffixes_count, 0, threads, thread_state);
libsais16_partial_sorting_shift_markers_32s_6k_omp(SA, k, buckets, threads);
libsais16_partial_sorting_shift_buckets_32s_6k(k, buckets);
libsais16_partial_sorting_scan_right_to_left_32s_6k_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, d, threads, thread_state);
}
static void libsais16_induce_partial_order_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t));
sa_sint_t d = libsais16_partial_sorting_scan_left_to_right_32s_4k_omp(T, SA, n, k, buckets, 0, threads, thread_state);
libsais16_partial_sorting_shift_markers_32s_4k(SA, n);
libsais16_partial_sorting_scan_right_to_left_32s_4k_omp(T, SA, n, k, buckets, d, threads, thread_state);
libsais16_partial_sorting_gather_lms_suffixes_32s_4k_omp(SA, n, threads, thread_state);
}
static void libsais16_induce_partial_order_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais16_partial_sorting_scan_left_to_right_32s_1k_omp(T, SA, n, &buckets[1 * k], threads, thread_state);
libsais16_partial_sorting_scan_right_to_left_32s_1k_omp(T, SA, n, &buckets[0 * k], threads, thread_state);
libsais16_partial_sorting_gather_lms_suffixes_32s_1k_omp(SA, n, threads, thread_state);
}
static void libsais16_induce_partial_order_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais16_count_suffixes_32s(T, n, k, buckets);
libsais16_initialize_buckets_start_32s_1k(k, buckets);
libsais16_partial_sorting_scan_left_to_right_32s_1k_omp(T, SA, n, buckets, threads, thread_state);
libsais16_count_suffixes_32s(T, n, k, buckets);
libsais16_initialize_buckets_end_32s_1k(k, buckets);
libsais16_partial_sorting_scan_right_to_left_32s_1k_omp(T, SA, n, buckets, threads, thread_state);
libsais16_partial_sorting_gather_lms_suffixes_32s_1k_omp(SA, n, threads, thread_state);
}
static sa_sint_t libsais16_renumber_lms_suffixes_16u(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t name, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAm = &SA[m];
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais16_prefetch(&SA[i + 2 * prefetch_distance]);
libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 0] & SAINT_MAX) >> 1]);
libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 1] & SAINT_MAX) >> 1]);
libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 2] & SAINT_MAX) >> 1]);
libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 3] & SAINT_MAX) >> 1]);
sa_sint_t p0 = SA[i + 0]; SAm[(p0 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p0 < 0;
sa_sint_t p1 = SA[i + 1]; SAm[(p1 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p1 < 0;
sa_sint_t p2 = SA[i + 2]; SAm[(p2 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p2 < 0;
sa_sint_t p3 = SA[i + 3]; SAm[(p3 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p3 < 0;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
sa_sint_t p = SA[i]; SAm[(p & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p < 0;
}
return name;
}
static fast_sint_t libsais16_gather_marked_suffixes_16u(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
l -= 1;
fast_sint_t i, j;
for (i = (fast_sint_t)m + omp_block_start + omp_block_size - 1, j = (fast_sint_t)m + omp_block_start + 3; i >= j; i -= 4)
{
libsais16_prefetch(&SA[i - prefetch_distance]);
sa_sint_t s0 = SA[i - 0]; SA[l] = s0 & SAINT_MAX; l -= s0 < 0;
sa_sint_t s1 = SA[i - 1]; SA[l] = s1 & SAINT_MAX; l -= s1 < 0;
sa_sint_t s2 = SA[i - 2]; SA[l] = s2 & SAINT_MAX; l -= s2 < 0;
sa_sint_t s3 = SA[i - 3]; SA[l] = s3 & SAINT_MAX; l -= s3 < 0;
}
for (j -= 3; i >= j; i -= 1)
{
sa_sint_t s = SA[i]; SA[l] = s & SAINT_MAX; l -= s < 0;
}
l += 1;
return l;
}
static sa_sint_t libsais16_renumber_lms_suffixes_16u_omp(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t name = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start;
if (omp_num_threads == 1)
{
name = libsais16_renumber_lms_suffixes_16u(SA, m, 0, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais16_count_negative_marked_suffixes(SA, omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; }
if (omp_thread_num == omp_num_threads - 1)
{
name = (sa_sint_t)(count + thread_state[omp_thread_num].state.count);
}
libsais16_renumber_lms_suffixes_16u(SA, m, (sa_sint_t)count, omp_block_start, omp_block_size);
}
}
#endif
}
return name;
}
static void libsais16_gather_marked_lms_suffixes_16u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start;
if (omp_num_threads == 1)
{
libsais16_gather_marked_suffixes_16u(SA, m, (fast_sint_t)n + (fast_sint_t)fs, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
if (omp_thread_num < omp_num_threads - 1)
{
thread_state[omp_thread_num].state.position = libsais16_gather_marked_suffixes_16u(SA, m, (fast_sint_t)m + omp_block_start + omp_block_size, omp_block_start, omp_block_size);
thread_state[omp_thread_num].state.count = (fast_sint_t)m + omp_block_start + omp_block_size - thread_state[omp_thread_num].state.position;
}
else
{
thread_state[omp_thread_num].state.position = libsais16_gather_marked_suffixes_16u(SA, m, (fast_sint_t)n + (fast_sint_t)fs, omp_block_start, omp_block_size);
thread_state[omp_thread_num].state.count = (fast_sint_t)n + (fast_sint_t)fs - thread_state[omp_thread_num].state.position;
}
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t, position = (fast_sint_t)n + (fast_sint_t)fs;
for (t = omp_num_threads - 1; t >= 0; --t)
{
position -= thread_state[t].state.count;
if (t != omp_num_threads - 1 && thread_state[t].state.count > 0)
{
memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t));
}
}
}
}
#endif
}
}
static sa_sint_t libsais16_renumber_and_gather_lms_suffixes_16u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
memset(&SA[m], 0, ((size_t)n >> 1) * sizeof(sa_sint_t));
sa_sint_t name = libsais16_renumber_lms_suffixes_16u_omp(SA, m, threads, thread_state);
if (name < m)
{
libsais16_gather_marked_lms_suffixes_16u_omp(SA, n, m, fs, threads, thread_state);
}
else
{
fast_sint_t i; for (i = 0; i < m; i += 1) { SA[i] &= SAINT_MAX; }
}
return name;
}
static sa_sint_t libsais16_renumber_distinct_lms_suffixes_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t name, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAm = &SA[m];
fast_sint_t i, j; sa_sint_t p0, p1, p2, p3 = 0;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 0] & SAINT_MAX) >> 1]);
libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 1] & SAINT_MAX) >> 1]);
libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 2] & SAINT_MAX) >> 1]);
libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 3] & SAINT_MAX) >> 1]);
p0 = SA[i + 0]; SAm[(SA[i + 0] = p0 & SAINT_MAX) >> 1] = name | (p0 & p3 & SAINT_MIN); name += p0 < 0;
p1 = SA[i + 1]; SAm[(SA[i + 1] = p1 & SAINT_MAX) >> 1] = name | (p1 & p0 & SAINT_MIN); name += p1 < 0;
p2 = SA[i + 2]; SAm[(SA[i + 2] = p2 & SAINT_MAX) >> 1] = name | (p2 & p1 & SAINT_MIN); name += p2 < 0;
p3 = SA[i + 3]; SAm[(SA[i + 3] = p3 & SAINT_MAX) >> 1] = name | (p3 & p2 & SAINT_MIN); name += p3 < 0;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
p2 = p3; p3 = SA[i]; SAm[(SA[i] = p3 & SAINT_MAX) >> 1] = name | (p3 & p2 & SAINT_MIN); name += p3 < 0;
}
return name;
}
static void libsais16_mark_distinct_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j; sa_sint_t p0, p1, p2, p3 = 0;
for (i = (fast_sint_t)m + omp_block_start, j = (fast_sint_t)m + omp_block_start + omp_block_size - 3; i < j; i += 4)
{
libsais16_prefetchw(&SA[i + prefetch_distance]);
p0 = SA[i + 0]; SA[i + 0] = p0 & (p3 | SAINT_MAX); p0 = (p0 == 0) ? p3 : p0;
p1 = SA[i + 1]; SA[i + 1] = p1 & (p0 | SAINT_MAX); p1 = (p1 == 0) ? p0 : p1;
p2 = SA[i + 2]; SA[i + 2] = p2 & (p1 | SAINT_MAX); p2 = (p2 == 0) ? p1 : p2;
p3 = SA[i + 3]; SA[i + 3] = p3 & (p2 | SAINT_MAX); p3 = (p3 == 0) ? p2 : p3;
}
for (j += 3; i < j; i += 1)
{
p2 = p3; p3 = SA[i]; SA[i] = p3 & (p2 | SAINT_MAX); p3 = (p3 == 0) ? p2 : p3;
}
}
static void libsais16_clamp_lms_suffixes_length_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAm = &SA[m];
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 3; i < j; i += 4)
{
libsais16_prefetchw(&SAm[i + prefetch_distance]);
SAm[i + 0] = (SAm[i + 0] < 0 ? SAm[i + 0] : 0) & SAINT_MAX;
SAm[i + 1] = (SAm[i + 1] < 0 ? SAm[i + 1] : 0) & SAINT_MAX;
SAm[i + 2] = (SAm[i + 2] < 0 ? SAm[i + 2] : 0) & SAINT_MAX;
SAm[i + 3] = (SAm[i + 3] < 0 ? SAm[i + 3] : 0) & SAINT_MAX;
}
for (j += 3; i < j; i += 1)
{
SAm[i] = (SAm[i] < 0 ? SAm[i] : 0) & SAINT_MAX;
}
}
static sa_sint_t libsais16_renumber_distinct_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t name = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start;
if (omp_num_threads == 1)
{
name = libsais16_renumber_distinct_lms_suffixes_32s_4k(SA, m, 1, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais16_count_negative_marked_suffixes(SA, omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t t, count = 1; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; }
if (omp_thread_num == omp_num_threads - 1)
{
name = (sa_sint_t)(count + thread_state[omp_thread_num].state.count);
}
libsais16_renumber_distinct_lms_suffixes_32s_4k(SA, m, (sa_sint_t)count, omp_block_start, omp_block_size);
}
}
#endif
}
return name - 1;
}
static void libsais16_mark_distinct_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start;
#else
UNUSED(threads);
fast_sint_t omp_block_start = 0;
fast_sint_t omp_block_size = (fast_sint_t)n >> 1;
#endif
libsais16_mark_distinct_lms_suffixes_32s(SA, m, omp_block_start, omp_block_size);
}
}
static void libsais16_clamp_lms_suffixes_length_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start;
#else
UNUSED(threads);
fast_sint_t omp_block_start = 0;
fast_sint_t omp_block_size = (fast_sint_t)n >> 1;
#endif
libsais16_clamp_lms_suffixes_length_32s(SA, m, omp_block_start, omp_block_size);
}
}
static sa_sint_t libsais16_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
memset(&SA[m], 0, ((size_t)n >> 1) * sizeof(sa_sint_t));
sa_sint_t name = libsais16_renumber_distinct_lms_suffixes_32s_4k_omp(SA, m, threads, thread_state);
if (name < m)
{
libsais16_mark_distinct_lms_suffixes_32s_omp(SA, n, m, threads);
}
return name;
}
static sa_sint_t libsais16_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAm = &SA[m];
{
libsais16_gather_lms_suffixes_32s(T, SA, n);
memset(&SA[m], 0, ((size_t)n - (size_t)m - (size_t)m) * sizeof(sa_sint_t));
fast_sint_t i, j;
for (i = (fast_sint_t)n - (fast_sint_t)m, j = (fast_sint_t)n - 1 - prefetch_distance - 3; i < j; i += 4)
{
libsais16_prefetch(&SA[i + 2 * prefetch_distance]);
libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]);
libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]);
libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 2]) >> 1]);
libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 3]) >> 1]);
SAm[((sa_uint_t)SA[i + 0]) >> 1] = SA[i + 1] - SA[i + 0] + 1 + SAINT_MIN;
SAm[((sa_uint_t)SA[i + 1]) >> 1] = SA[i + 2] - SA[i + 1] + 1 + SAINT_MIN;
SAm[((sa_uint_t)SA[i + 2]) >> 1] = SA[i + 3] - SA[i + 2] + 1 + SAINT_MIN;
SAm[((sa_uint_t)SA[i + 3]) >> 1] = SA[i + 4] - SA[i + 3] + 1 + SAINT_MIN;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
SAm[((sa_uint_t)SA[i]) >> 1] = SA[i + 1] - SA[i] + 1 + SAINT_MIN;
}
SAm[((sa_uint_t)SA[n - 1]) >> 1] = 1 + SAINT_MIN;
}
{
libsais16_clamp_lms_suffixes_length_32s_omp(SA, n, m, threads);
}
sa_sint_t name = 1;
{
fast_sint_t i, j, p = SA[0], plen = SAm[p >> 1]; sa_sint_t pdiff = SAINT_MIN;
for (i = 1, j = m - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetch(&SA[i + 2 * prefetch_distance]);
libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]); libsais16_prefetch(&T[((sa_uint_t)SA[i + prefetch_distance + 0])]);
libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]); libsais16_prefetch(&T[((sa_uint_t)SA[i + prefetch_distance + 1])]);
fast_sint_t q = SA[i + 0], qlen = SAm[q >> 1]; sa_sint_t qdiff = SAINT_MIN;
if (plen == qlen) { fast_sint_t l = 0; do { if (T[p + l] != T[q + l]) { break; } } while (++l < qlen); qdiff = (sa_sint_t)(l - qlen) & SAINT_MIN; }
SAm[p >> 1] = name | (pdiff & qdiff); name += (qdiff < 0);
p = SA[i + 1]; plen = SAm[p >> 1]; pdiff = SAINT_MIN;
if (qlen == plen) { fast_sint_t l = 0; do { if (T[q + l] != T[p + l]) { break; } } while (++l < plen); pdiff = (sa_sint_t)(l - plen) & SAINT_MIN; }
SAm[q >> 1] = name | (qdiff & pdiff); name += (pdiff < 0);
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
fast_sint_t q = SA[i], qlen = SAm[q >> 1]; sa_sint_t qdiff = SAINT_MIN;
if (plen == qlen) { fast_sint_t l = 0; do { if (T[p + l] != T[q + l]) { break; } } while (++l < plen); qdiff = (sa_sint_t)(l - plen) & SAINT_MIN; }
SAm[p >> 1] = name | (pdiff & qdiff); name += (qdiff < 0);
p = q; plen = qlen; pdiff = qdiff;
}
SAm[p >> 1] = name | pdiff; name++;
}
if (name <= m)
{
libsais16_mark_distinct_lms_suffixes_32s_omp(SA, n, m, threads);
}
return name - 1;
}
static void libsais16_reconstruct_lms_suffixes(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
const sa_sint_t * RESTRICT SAnm = &SA[n - m];
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
libsais16_prefetch(&SAnm[SA[i + prefetch_distance + 0]]);
libsais16_prefetch(&SAnm[SA[i + prefetch_distance + 1]]);
libsais16_prefetch(&SAnm[SA[i + prefetch_distance + 2]]);
libsais16_prefetch(&SAnm[SA[i + prefetch_distance + 3]]);
SA[i + 0] = SAnm[SA[i + 0]];
SA[i + 1] = SAnm[SA[i + 1]];
SA[i + 2] = SAnm[SA[i + 2]];
SA[i + 3] = SAnm[SA[i + 3]];
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
SA[i] = SAnm[SA[i]];
}
}
static void libsais16_reconstruct_lms_suffixes_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start;
#else
UNUSED(threads);
fast_sint_t omp_block_start = 0;
fast_sint_t omp_block_size = m;
#endif
libsais16_reconstruct_lms_suffixes(SA, n, m, omp_block_start, omp_block_size);
}
}
static void libsais16_place_lms_suffixes_interval_16u(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, const sa_sint_t * RESTRICT buckets)
{
const sa_sint_t * RESTRICT bucket_end = &buckets[7 * ALPHABET_SIZE];
fast_sint_t c, j = n;
for (c = ALPHABET_SIZE - 2; c >= 0; --c)
{
fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1) + BUCKETS_INDEX2(1, 0)] - (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)];
if (l > 0)
{
fast_sint_t i = bucket_end[c];
if (j - i > 0)
{
memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t));
}
memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t));
}
}
memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t));
}
static void libsais16_place_lms_suffixes_interval_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets)
{
const sa_sint_t * RESTRICT bucket_end = &buckets[3 * k];
fast_sint_t c, j = n;
for (c = (fast_sint_t)k - 2; c >= 0; --c)
{
fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1) + BUCKETS_INDEX2(1, 0)] - (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)];
if (l > 0)
{
fast_sint_t i = bucket_end[c];
if (j - i > 0)
{
memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t));
}
memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t));
}
}
memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t));
}
static void libsais16_place_lms_suffixes_interval_32s_2k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets)
{
fast_sint_t j = n;
if (k > 1)
{
fast_sint_t c;
for (c = BUCKETS_INDEX2((fast_sint_t)k - 2, 0); c >= BUCKETS_INDEX2(0, 0); c -= BUCKETS_INDEX2(1, 0))
{
fast_sint_t l = (fast_sint_t)buckets[c + BUCKETS_INDEX2(1, 1)] - (fast_sint_t)buckets[c + BUCKETS_INDEX2(0, 1)];
if (l > 0)
{
fast_sint_t i = buckets[c];
if (j - i > 0)
{
memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t));
}
memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t));
}
}
}
memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t));
}
static void libsais16_place_lms_suffixes_interval_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t m, sa_sint_t * RESTRICT buckets)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t c = k - 1; fast_sint_t i, l = buckets[c];
for (i = (fast_sint_t)m - 1; i >= prefetch_distance + 3; i -= 4)
{
libsais16_prefetch(&SA[i - 2 * prefetch_distance]);
libsais16_prefetch(&T[SA[i - prefetch_distance - 0]]);
libsais16_prefetch(&T[SA[i - prefetch_distance - 1]]);
libsais16_prefetch(&T[SA[i - prefetch_distance - 2]]);
libsais16_prefetch(&T[SA[i - prefetch_distance - 3]]);
sa_sint_t p0 = SA[i - 0]; if (T[p0] != c) { c = T[p0]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p0;
sa_sint_t p1 = SA[i - 1]; if (T[p1] != c) { c = T[p1]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p1;
sa_sint_t p2 = SA[i - 2]; if (T[p2] != c) { c = T[p2]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p2;
sa_sint_t p3 = SA[i - 3]; if (T[p3] != c) { c = T[p3]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p3;
}
for (; i >= 0; i -= 1)
{
sa_sint_t p = SA[i]; if (T[p] != c) { c = T[p]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p;
}
memset(&SA[0], 0, (size_t)l * sizeof(sa_sint_t));
}
static void libsais16_place_lms_suffixes_histogram_32s_6k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets)
{
const sa_sint_t * RESTRICT bucket_end = &buckets[5 * k];
fast_sint_t c, j = n;
for (c = (fast_sint_t)k - 2; c >= 0; --c)
{
fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX4(c, 1)];
if (l > 0)
{
fast_sint_t i = bucket_end[c];
if (j - i > 0)
{
memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t));
}
memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t));
}
}
memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t));
}
static void libsais16_place_lms_suffixes_histogram_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets)
{
const sa_sint_t * RESTRICT bucket_end = &buckets[3 * k];
fast_sint_t c, j = n;
for (c = (fast_sint_t)k - 2; c >= 0; --c)
{
fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)];
if (l > 0)
{
fast_sint_t i = bucket_end[c];
if (j - i > 0)
{
memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t));
}
memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t));
}
}
memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t));
}
static void libsais16_place_lms_suffixes_histogram_32s_2k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets)
{
fast_sint_t j = n;
if (k > 1)
{
fast_sint_t c;
for (c = BUCKETS_INDEX2((fast_sint_t)k - 2, 0); c >= BUCKETS_INDEX2(0, 0); c -= BUCKETS_INDEX2(1, 0))
{
fast_sint_t l = (fast_sint_t)buckets[c + BUCKETS_INDEX2(0, 1)];
if (l > 0)
{
fast_sint_t i = buckets[c];
if (j - i > 0)
{
memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t));
}
memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t));
}
}
}
memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t));
}
static void libsais16_final_bwt_scan_left_to_right_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); }
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
}
static void libsais16_final_bwt_aux_scan_left_to_right_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); if ((p0 & rm) == 0) { I[p0 / (rm + 1)] = induction_bucket[T[p0]]; }}
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); if ((p1 & rm) == 0) { I[p1 / (rm + 1)] = induction_bucket[T[p1]]; }}
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]]; } }
}
}
static void libsais16_final_sorting_scan_left_to_right_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); }
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
}
static void libsais16_final_sorting_scan_left_to_right_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 3 * prefetch_distance]);
sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { libsais16_prefetchw(&induction_bucket[T[s2 - 1]]); libsais16_prefetch(&T[s2] - 2); }
sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { libsais16_prefetchw(&induction_bucket[T[s3 - 1]]); libsais16_prefetch(&T[s3] - 2); }
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); }
}
for (j += 2 * prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
}
#if defined(_OPENMP)
static fast_sint_t libsais16_final_bwt_scan_left_to_right_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); }
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
return count;
}
static fast_sint_t libsais16_final_sorting_scan_left_to_right_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); }
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
return count;
}
static void libsais16_final_order_scan_left_to_right_16u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = 0, j = count - 3; i < j; i += 4)
{
libsais16_prefetch(&cache[i + prefetch_distance]);
SA[buckets[cache[i + 0].symbol]++] = cache[i + 0].index;
SA[buckets[cache[i + 1].symbol]++] = cache[i + 1].index;
SA[buckets[cache[i + 2].symbol]++] = cache[i + 2].index;
SA[buckets[cache[i + 3].symbol]++] = cache[i + 3].index;
}
for (j += 3; i < j; i += 1)
{
SA[buckets[cache[i].symbol]++] = cache[i].index;
}
}
static void libsais16_final_bwt_aux_scan_left_to_right_16u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = 0, j = count - 3; i < j; i += 4)
{
libsais16_prefetch(&cache[i + prefetch_distance]);
SA[buckets[cache[i + 0].symbol]++] = cache[i + 0].index; if ((cache[i + 0].index & rm) == 0) { I[(cache[i + 0].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 0].symbol]; }
SA[buckets[cache[i + 1].symbol]++] = cache[i + 1].index; if ((cache[i + 1].index & rm) == 0) { I[(cache[i + 1].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 1].symbol]; }
SA[buckets[cache[i + 2].symbol]++] = cache[i + 2].index; if ((cache[i + 2].index & rm) == 0) { I[(cache[i + 2].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 2].symbol]; }
SA[buckets[cache[i + 3].symbol]++] = cache[i + 3].index; if ((cache[i + 3].index & rm) == 0) { I[(cache[i + 3].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 3].symbol]; }
}
for (j += 3; i < j; i += 1)
{
SA[buckets[cache[i].symbol]++] = cache[i].index; if ((cache[i].index & rm) == 0) { I[(cache[i].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i].symbol]; }
}
}
static void libsais16_final_sorting_scan_left_to_right_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
libsais16_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; cache[i + 0].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); symbol0 = T[p0]; } cache[i + 0].symbol = symbol0;
sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; cache[i + 1].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); symbol1 = T[p1]; } cache[i + 1].symbol = symbol1;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t symbol = SAINT_MIN, p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; cache[i].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); symbol = T[p]; } cache[i].symbol = symbol;
}
}
static void libsais16_final_sorting_scan_left_to_right_32s_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size;
for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&cache[i + 2 * prefetch_distance]);
sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais16_prefetchw(s0 >= 0 ? Is0 : NULL);
sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais16_prefetchw(s1 >= 0 ? Is1 : NULL);
sa_sint_t v0 = cache[i + 0].symbol;
if (v0 >= 0)
{
cache[i + 0].symbol = induction_bucket[v0]++;
if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; cache[i + 0].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } }
}
sa_sint_t v1 = cache[i + 1].symbol;
if (v1 >= 0)
{
cache[i + 1].symbol = induction_bucket[v1]++;
if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; cache[i + 1].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } }
}
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t v = cache[i].symbol;
if (v >= 0)
{
cache[i].symbol = induction_bucket[v]++;
if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; cache[i].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } }
}
}
}
static void libsais16_final_bwt_scan_left_to_right_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais16_final_bwt_scan_left_to_right_16u(T, SA, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais16_final_bwt_scan_left_to_right_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t;
for (t = 0; t < omp_num_threads; ++t)
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; }
}
}
#pragma omp barrier
{
libsais16_final_order_scan_left_to_right_16u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count);
}
}
#endif
}
}
static void libsais16_final_bwt_aux_scan_left_to_right_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais16_final_bwt_aux_scan_left_to_right_16u(T, SA, rm, I, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais16_final_bwt_scan_left_to_right_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t;
for (t = 0; t < omp_num_threads; ++t)
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; }
}
}
#pragma omp barrier
{
libsais16_final_bwt_aux_scan_left_to_right_16u_block_place(SA, rm, I, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count);
}
}
#endif
}
}
static void libsais16_final_sorting_scan_left_to_right_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais16_final_sorting_scan_left_to_right_16u(T, SA, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais16_final_sorting_scan_left_to_right_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t;
for (t = 0; t < omp_num_threads; ++t)
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; }
}
}
#pragma omp barrier
{
libsais16_final_order_scan_left_to_right_16u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count);
}
}
#endif
}
}
static void libsais16_final_sorting_scan_left_to_right_32s_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais16_final_sorting_scan_left_to_right_32s(T, SA, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais16_final_sorting_scan_left_to_right_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
libsais16_final_sorting_scan_left_to_right_32s_block_sort(T, buckets, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais16_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
}
#endif
static void libsais16_final_bwt_scan_left_to_right_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1));
if (threads == 1 || n < 65536)
{
libsais16_final_bwt_scan_left_to_right_16u(T, SA, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = 0; block_start < n; )
{
if (SA[block_start] == 0)
{
block_start++;
}
else
{
fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;}
fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; }
fast_sint_t block_size = block_end - block_start;
if (block_size < 32)
{
for (; block_start < block_end; block_start += 1)
{
sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[block_start] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
}
else
{
libsais16_final_bwt_scan_left_to_right_16u_block_omp(T, SA, induction_bucket, block_start, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais16_final_bwt_aux_scan_left_to_right_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1));
if ((((sa_sint_t)n - 1) & rm) == 0) { I[((sa_sint_t)n - 1) / (rm + 1)] = induction_bucket[T[(sa_sint_t)n - 1]]; }
if (threads == 1 || n < 65536)
{
libsais16_final_bwt_aux_scan_left_to_right_16u(T, SA, rm, I, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = 0; block_start < n; )
{
if (SA[block_start] == 0)
{
block_start++;
}
else
{
fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;}
fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; }
fast_sint_t block_size = block_end - block_start;
if (block_size < 32)
{
for (; block_start < block_end; block_start += 1)
{
sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[block_start] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]]; } }
}
}
else
{
libsais16_final_bwt_aux_scan_left_to_right_16u_block_omp(T, SA, rm, I, induction_bucket, block_start, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais16_final_sorting_scan_left_to_right_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1));
if (threads == 1 || n < 65536)
{
libsais16_final_sorting_scan_left_to_right_16u(T, SA, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = 0; block_start < n; )
{
if (SA[block_start] == 0)
{
block_start++;
}
else
{
fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;}
fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; }
fast_sint_t block_size = block_end - block_start;
if (block_size < 32)
{
for (; block_start < block_end; block_start += 1)
{
sa_sint_t p = SA[block_start]; SA[block_start] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
}
else
{
libsais16_final_sorting_scan_left_to_right_16u_block_omp(T, SA, induction_bucket, block_start, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais16_final_sorting_scan_left_to_right_32s_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
SA[induction_bucket[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1));
if (threads == 1 || n < 65536)
{
libsais16_final_sorting_scan_left_to_right_32s(T, SA, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = 0; block_start < n; block_start = block_end)
{
block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; }
libsais16_final_sorting_scan_left_to_right_32s_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, block_start, block_end - block_start, threads);
}
}
#else
UNUSED(thread_state);
#endif
}
static sa_sint_t libsais16_final_bwt_scan_right_to_left_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j; sa_sint_t index = -1;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&SA[i - 2 * prefetch_distance]);
sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i - 0]; index = (p0 == 0) ? (sa_sint_t)(i - 0) : index;
SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint16_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p0 : t; }
sa_sint_t p1 = SA[i - 1]; index = (p1 == 0) ? (sa_sint_t)(i - 1) : index;
SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint16_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p1 : t; }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; index = (p == 0) ? (sa_sint_t)i : index;
SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint16_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; }
}
return index;
}
static void libsais16_final_bwt_aux_scan_right_to_left_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&SA[i - 2 * prefetch_distance]);
sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i - 0];
SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint16_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p0 : t; if ((p0 & rm) == 0) { I[p0 / (rm + 1)] = induction_bucket[T[p0]] + 1; } }
sa_sint_t p1 = SA[i - 1];
SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint16_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p1 : t; if ((p1 & rm) == 0) { I[p1 / (rm + 1)] = induction_bucket[T[p1]] + 1; } }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i];
SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint16_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]] + 1; } }
}
}
static void libsais16_final_sorting_scan_right_to_left_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&SA[i - 2 * prefetch_distance]);
sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[--induction_bucket[T[p0]]] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[--induction_bucket[T[p1]]] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); }
}
}
static void libsais16_final_sorting_scan_right_to_left_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&SA[i - 3 * prefetch_distance]);
sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { libsais16_prefetchw(&induction_bucket[T[s2 - 1]]); libsais16_prefetch(&T[s2] - 2); }
sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { libsais16_prefetchw(&induction_bucket[T[s3 - 1]]); libsais16_prefetch(&T[s3] - 2); }
sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[--induction_bucket[T[p0]]] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[--induction_bucket[T[p1]]] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); }
}
for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); }
}
}
#if defined(_OPENMP)
static fast_sint_t libsais16_final_bwt_scan_right_to_left_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&SA[i - 2 * prefetch_distance]);
sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint16_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p0 : t; }
sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint16_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p1 : t; }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint16_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p : t; }
}
return count;
}
static fast_sint_t libsais16_final_bwt_aux_scan_right_to_left_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&SA[i - 2 * prefetch_distance]);
sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint16_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p0 : t; cache[count + 1].index = p0; count += 2; }
sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint16_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p1 : t; cache[count + 1].index = p1; count += 2; }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint16_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p : t; cache[count + 1].index = p; count += 2; }
}
return count;
}
static fast_sint_t libsais16_final_sorting_scan_right_to_left_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&SA[i - 2 * prefetch_distance]);
sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); }
}
return count;
}
static void libsais16_final_order_scan_right_to_left_16u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = 0, j = count - 3; i < j; i += 4)
{
libsais16_prefetch(&cache[i + prefetch_distance]);
SA[--buckets[cache[i + 0].symbol]] = cache[i + 0].index;
SA[--buckets[cache[i + 1].symbol]] = cache[i + 1].index;
SA[--buckets[cache[i + 2].symbol]] = cache[i + 2].index;
SA[--buckets[cache[i + 3].symbol]] = cache[i + 3].index;
}
for (j += 3; i < j; i += 1)
{
SA[--buckets[cache[i].symbol]] = cache[i].index;
}
}
static void libsais16_final_bwt_aux_scan_right_to_left_16u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = 0, j = count - 6; i < j; i += 8)
{
libsais16_prefetch(&cache[i + prefetch_distance]);
SA[--buckets[cache[i + 0].symbol]] = cache[i + 0].index; if ((cache[i + 1].index & rm) == 0) { I[cache[i + 1].index / (rm + 1)] = buckets[cache[i + 0].symbol] + 1; }
SA[--buckets[cache[i + 2].symbol]] = cache[i + 2].index; if ((cache[i + 3].index & rm) == 0) { I[cache[i + 3].index / (rm + 1)] = buckets[cache[i + 2].symbol] + 1; }
SA[--buckets[cache[i + 4].symbol]] = cache[i + 4].index; if ((cache[i + 5].index & rm) == 0) { I[cache[i + 5].index / (rm + 1)] = buckets[cache[i + 4].symbol] + 1; }
SA[--buckets[cache[i + 6].symbol]] = cache[i + 6].index; if ((cache[i + 7].index & rm) == 0) { I[cache[i + 7].index / (rm + 1)] = buckets[cache[i + 6].symbol] + 1; }
}
for (j += 6; i < j; i += 2)
{
SA[--buckets[cache[i].symbol]] = cache[i].index; if ((cache[i + 1].index & rm) == 0) { I[(cache[i + 1].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i].symbol] + 1; }
}
}
static void libsais16_final_sorting_scan_right_to_left_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais16_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL);
libsais16_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; cache[i + 0].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); symbol0 = T[p0]; } cache[i + 0].symbol = symbol0;
sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; cache[i + 1].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); symbol1 = T[p1]; } cache[i + 1].symbol = symbol1;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t symbol = SAINT_MIN, p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; cache[i].index = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); symbol = T[p]; } cache[i].symbol = symbol;
}
}
static void libsais16_final_sorting_scan_right_to_left_32s_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais16_prefetchw(&cache[i - 2 * prefetch_distance]);
sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais16_prefetchw(s0 >= 0 ? Is0 : NULL);
sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais16_prefetchw(s1 >= 0 ? Is1 : NULL);
sa_sint_t v0 = cache[i - 0].symbol;
if (v0 >= 0)
{
cache[i - 0].symbol = --induction_bucket[v0];
if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; cache[i - 0].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } }
}
sa_sint_t v1 = cache[i - 1].symbol;
if (v1 >= 0)
{
cache[i - 1].symbol = --induction_bucket[v1];
if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; cache[i - 1].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } }
}
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t v = cache[i].symbol;
if (v >= 0)
{
cache[i].symbol = --induction_bucket[v];
if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; cache[i].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } }
}
}
}
static void libsais16_final_bwt_scan_right_to_left_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais16_final_bwt_scan_right_to_left_16u(T, SA, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais16_final_bwt_scan_right_to_left_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; }
}
}
#pragma omp barrier
{
libsais16_final_order_scan_right_to_left_16u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count);
}
}
#endif
}
}
static void libsais16_final_bwt_aux_scan_right_to_left_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais16_final_bwt_aux_scan_right_to_left_16u(T, SA, rm, I, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais16_final_bwt_aux_scan_right_to_left_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; }
}
}
#pragma omp barrier
{
libsais16_final_bwt_aux_scan_right_to_left_16u_block_place(SA, rm, I, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count);
}
}
#endif
}
}
static void libsais16_final_sorting_scan_right_to_left_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais16_final_sorting_scan_right_to_left_16u(T, SA, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais16_final_sorting_scan_right_to_left_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; }
}
}
#pragma omp barrier
{
libsais16_final_order_scan_right_to_left_16u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count);
}
}
#endif
}
}
static void libsais16_final_sorting_scan_right_to_left_32s_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais16_final_sorting_scan_right_to_left_32s(T, SA, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais16_final_sorting_scan_right_to_left_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
libsais16_final_sorting_scan_right_to_left_32s_block_sort(T, buckets, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais16_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
}
#endif
static sa_sint_t libsais16_final_bwt_scan_right_to_left_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t index = -1;
if (threads == 1 || n < 65536)
{
index = libsais16_final_bwt_scan_right_to_left_16u(T, SA, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = (fast_sint_t)n - 1; block_start >= 0; )
{
if (SA[block_start] == 0)
{
index = (sa_sint_t)block_start--;
}
else
{
fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < 0) { block_max_end = -1; }
fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; }
fast_sint_t block_size = block_start - block_end;
if (block_size < 32)
{
for (; block_start > block_end; block_start -= 1)
{
sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; uint16_t c0 = T[p - (p > 0)], c1 = T[p]; SA[block_start] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; }
}
}
else
{
libsais16_final_bwt_scan_right_to_left_16u_block_omp(T, SA, induction_bucket, block_end + 1, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
return index;
}
static void libsais16_final_bwt_aux_scan_right_to_left_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || n < 65536)
{
libsais16_final_bwt_aux_scan_right_to_left_16u(T, SA, rm, I, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = (fast_sint_t)n - 1; block_start >= 0; )
{
if (SA[block_start] == 0)
{
block_start--;
}
else
{
fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * ((LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads) / 2); if (block_max_end < 0) { block_max_end = -1; }
fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; }
fast_sint_t block_size = block_start - block_end;
if (block_size < 32)
{
for (; block_start > block_end; block_start -= 1)
{
sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; uint16_t c0 = T[p - (p > 0)], c1 = T[p]; SA[block_start] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]] + 1; } }
}
}
else
{
libsais16_final_bwt_aux_scan_right_to_left_16u_block_omp(T, SA, rm, I, induction_bucket, block_end + 1, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais16_final_sorting_scan_right_to_left_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || n < 65536)
{
libsais16_final_sorting_scan_right_to_left_16u(T, SA, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = (fast_sint_t)n - 1; block_start >= 0; )
{
if (SA[block_start] == 0)
{
block_start--;
}
else
{
fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < -1) { block_max_end = -1; }
fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; }
fast_sint_t block_size = block_start - block_end;
if (block_size < 32)
{
for (; block_start > block_end; block_start -= 1)
{
sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); }
}
}
else
{
libsais16_final_sorting_scan_right_to_left_16u_block_omp(T, SA, induction_bucket, block_end + 1, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais16_final_sorting_scan_right_to_left_32s_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || n < 65536)
{
libsais16_final_sorting_scan_right_to_left_32s(T, SA, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end)
{
block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; }
libsais16_final_sorting_scan_right_to_left_32s_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads);
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais16_clear_lms_suffixes_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT bucket_start, sa_sint_t * RESTRICT bucket_end, sa_sint_t threads)
{
fast_sint_t c;
#if defined(_OPENMP)
#pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && n >= 65536)
#else
UNUSED(threads); UNUSED(n);
#endif
for (c = 0; c < k; ++c)
{
if (bucket_end[c] > bucket_start[c])
{
memset(&SA[bucket_start[c]], 0, ((size_t)bucket_end[c] - (size_t)bucket_start[c]) * sizeof(sa_sint_t));
}
}
}
static sa_sint_t libsais16_induce_final_order_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (!bwt)
{
libsais16_final_sorting_scan_left_to_right_16u_omp(T, SA, n, &buckets[6 * ALPHABET_SIZE], threads, thread_state);
if (threads > 1 && n >= 65536) { libsais16_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); }
libsais16_final_sorting_scan_right_to_left_16u_omp(T, SA, n, &buckets[7 * ALPHABET_SIZE], threads, thread_state);
return 0;
}
else if (I != NULL)
{
libsais16_final_bwt_aux_scan_left_to_right_16u_omp(T, SA, n, r - 1, I, &buckets[6 * ALPHABET_SIZE], threads, thread_state);
if (threads > 1 && n >= 65536) { libsais16_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); }
libsais16_final_bwt_aux_scan_right_to_left_16u_omp(T, SA, n, r - 1, I, &buckets[7 * ALPHABET_SIZE], threads, thread_state);
return 0;
}
else
{
libsais16_final_bwt_scan_left_to_right_16u_omp(T, SA, n, &buckets[6 * ALPHABET_SIZE], threads, thread_state);
if (threads > 1 && n >= 65536) { libsais16_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); }
return libsais16_final_bwt_scan_right_to_left_16u_omp(T, SA, n, &buckets[7 * ALPHABET_SIZE], threads, thread_state);
}
}
static void libsais16_induce_final_order_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais16_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[4 * k], threads, thread_state);
libsais16_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[5 * k], threads, thread_state);
}
static void libsais16_induce_final_order_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais16_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[2 * k], threads, thread_state);
libsais16_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[3 * k], threads, thread_state);
}
static void libsais16_induce_final_order_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais16_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[1 * k], threads, thread_state);
libsais16_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[0 * k], threads, thread_state);
}
static void libsais16_induce_final_order_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais16_count_suffixes_32s(T, n, k, buckets);
libsais16_initialize_buckets_start_32s_1k(k, buckets);
libsais16_final_sorting_scan_left_to_right_32s_omp(T, SA, n, buckets, threads, thread_state);
libsais16_count_suffixes_32s(T, n, k, buckets);
libsais16_initialize_buckets_end_32s_1k(k, buckets);
libsais16_final_sorting_scan_right_to_left_32s_omp(T, SA, n, buckets, threads, thread_state);
}
static sa_sint_t libsais16_renumber_unique_and_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t f, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAm = &SA[m];
sa_sint_t i, j;
for (i = (sa_sint_t)omp_block_start, j = (sa_sint_t)omp_block_start + (sa_sint_t)omp_block_size - 2 * (sa_sint_t)prefetch_distance - 3; i < j; i += 4)
{
libsais16_prefetch(&SA[i + 3 * prefetch_distance]);
libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 0]) >> 1]);
libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 1]) >> 1]);
libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 2]) >> 1]);
libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 3]) >> 1]);
sa_uint_t q0 = (sa_uint_t)SA[i + prefetch_distance + 0]; const sa_sint_t * Tq0 = &T[q0]; libsais16_prefetchw(SAm[q0 >> 1] < 0 ? Tq0 : NULL);
sa_uint_t q1 = (sa_uint_t)SA[i + prefetch_distance + 1]; const sa_sint_t * Tq1 = &T[q1]; libsais16_prefetchw(SAm[q1 >> 1] < 0 ? Tq1 : NULL);
sa_uint_t q2 = (sa_uint_t)SA[i + prefetch_distance + 2]; const sa_sint_t * Tq2 = &T[q2]; libsais16_prefetchw(SAm[q2 >> 1] < 0 ? Tq2 : NULL);
sa_uint_t q3 = (sa_uint_t)SA[i + prefetch_distance + 3]; const sa_sint_t * Tq3 = &T[q3]; libsais16_prefetchw(SAm[q3 >> 1] < 0 ? Tq3 : NULL);
sa_uint_t p0 = (sa_uint_t)SA[i + 0]; sa_sint_t s0 = SAm[p0 >> 1]; if (s0 < 0) { T[p0] |= SAINT_MIN; f++; s0 = i + 0 + SAINT_MIN + f; } SAm[p0 >> 1] = s0 - f;
sa_uint_t p1 = (sa_uint_t)SA[i + 1]; sa_sint_t s1 = SAm[p1 >> 1]; if (s1 < 0) { T[p1] |= SAINT_MIN; f++; s1 = i + 1 + SAINT_MIN + f; } SAm[p1 >> 1] = s1 - f;
sa_uint_t p2 = (sa_uint_t)SA[i + 2]; sa_sint_t s2 = SAm[p2 >> 1]; if (s2 < 0) { T[p2] |= SAINT_MIN; f++; s2 = i + 2 + SAINT_MIN + f; } SAm[p2 >> 1] = s2 - f;
sa_uint_t p3 = (sa_uint_t)SA[i + 3]; sa_sint_t s3 = SAm[p3 >> 1]; if (s3 < 0) { T[p3] |= SAINT_MIN; f++; s3 = i + 3 + SAINT_MIN + f; } SAm[p3 >> 1] = s3 - f;
}
for (j += 2 * (sa_sint_t)prefetch_distance + 3; i < j; i += 1)
{
sa_uint_t p = (sa_uint_t)SA[i]; sa_sint_t s = SAm[p >> 1]; if (s < 0) { T[p] |= SAINT_MIN; f++; s = i + SAINT_MIN + f; } SAm[p >> 1] = s - f;
}
return f;
}
static void libsais16_compact_unique_and_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t * pl, fast_sint_t * pr, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAl = &SA[0];
sa_sint_t * RESTRICT SAr = &SA[0];
fast_sint_t i, j, l = *pl - 1, r = *pr - 1;
for (i = (fast_sint_t)m + omp_block_start + omp_block_size - 1, j = (fast_sint_t)m + omp_block_start + 3; i >= j; i -= 4)
{
libsais16_prefetch(&SA[i - prefetch_distance]);
sa_sint_t p0 = SA[i - 0]; SAl[l] = p0 & SAINT_MAX; l -= p0 < 0; SAr[r] = p0 - 1; r -= p0 > 0;
sa_sint_t p1 = SA[i - 1]; SAl[l] = p1 & SAINT_MAX; l -= p1 < 0; SAr[r] = p1 - 1; r -= p1 > 0;
sa_sint_t p2 = SA[i - 2]; SAl[l] = p2 & SAINT_MAX; l -= p2 < 0; SAr[r] = p2 - 1; r -= p2 > 0;
sa_sint_t p3 = SA[i - 3]; SAl[l] = p3 & SAINT_MAX; l -= p3 < 0; SAr[r] = p3 - 1; r -= p3 > 0;
}
for (j -= 3; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SAl[l] = p & SAINT_MAX; l -= p < 0; SAr[r] = p - 1; r -= p > 0;
}
*pl = l + 1; *pr = r + 1;
}
#if defined(_OPENMP)
static sa_sint_t libsais16_count_unique_suffixes(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAm = &SA[m];
fast_sint_t i, j; sa_sint_t f0 = 0, f1 = 0, f2 = 0, f3 = 0;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais16_prefetch(&SA[i + 2 * prefetch_distance]);
libsais16_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]);
libsais16_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]);
libsais16_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 2]) >> 1]);
libsais16_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 3]) >> 1]);
f0 += SAm[((sa_uint_t)SA[i + 0]) >> 1] < 0;
f1 += SAm[((sa_uint_t)SA[i + 1]) >> 1] < 0;
f2 += SAm[((sa_uint_t)SA[i + 2]) >> 1] < 0;
f3 += SAm[((sa_uint_t)SA[i + 3]) >> 1] < 0;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
f0 += SAm[((sa_uint_t)SA[i]) >> 1] < 0;
}
return f0 + f1 + f2 + f3;
}
#endif
static sa_sint_t libsais16_renumber_unique_and_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t f = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start;
if (omp_num_threads == 1)
{
f = libsais16_renumber_unique_and_nonunique_lms_suffixes_32s(T, SA, m, 0, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais16_count_unique_suffixes(SA, m, omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; }
if (omp_thread_num == omp_num_threads - 1)
{
f = (sa_sint_t)(count + thread_state[omp_thread_num].state.count);
}
libsais16_renumber_unique_and_nonunique_lms_suffixes_32s(T, SA, m, (sa_sint_t)count, omp_block_start, omp_block_size);
}
}
#endif
}
return f;
}
static void libsais16_compact_unique_and_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072 && m < fs)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start;
if (omp_num_threads == 1)
{
fast_sint_t l = m, r = (fast_sint_t)n + (fast_sint_t)fs;
libsais16_compact_unique_and_nonunique_lms_suffixes_32s(SA, m, &l, &r, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.position = (fast_sint_t)m + ((fast_sint_t)n >> 1) + omp_block_start + omp_block_size;
thread_state[omp_thread_num].state.count = (fast_sint_t)m + omp_block_start + omp_block_size;
libsais16_compact_unique_and_nonunique_lms_suffixes_32s(SA, m, &thread_state[omp_thread_num].state.position, &thread_state[omp_thread_num].state.count, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t, position;
for (position = m, t = omp_num_threads - 1; t >= 0; --t)
{
fast_sint_t omp_block_end = t < omp_num_threads - 1 ? omp_block_stride * (t + 1) : ((fast_sint_t)n >> 1);
fast_sint_t count = ((fast_sint_t)m + ((fast_sint_t)n >> 1) + omp_block_end - thread_state[t].state.position);
if (count > 0)
{
position -= count; memcpy(&SA[position], &SA[thread_state[t].state.position], (size_t)count * sizeof(sa_sint_t));
}
}
for (position = (fast_sint_t)n + (fast_sint_t)fs, t = omp_num_threads - 1; t >= 0; --t)
{
fast_sint_t omp_block_end = t < omp_num_threads - 1 ? omp_block_stride * (t + 1) : ((fast_sint_t)n >> 1);
fast_sint_t count = ((fast_sint_t)m + omp_block_end - thread_state[t].state.count);
if (count > 0)
{
position -= count; memcpy(&SA[position], &SA[thread_state[t].state.count], (size_t)count * sizeof(sa_sint_t));
}
}
}
}
#endif
}
memcpy(&SA[(fast_sint_t)n + (fast_sint_t)fs - (fast_sint_t)m], &SA[(fast_sint_t)m - (fast_sint_t)f], (size_t)f * sizeof(sa_sint_t));
}
static sa_sint_t libsais16_compact_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t f = libsais16_renumber_unique_and_nonunique_lms_suffixes_32s_omp(T, SA, m, threads, thread_state);
libsais16_compact_unique_and_nonunique_lms_suffixes_32s_omp(SA, n, m, fs, f, threads, thread_state);
return f;
}
static void libsais16_merge_unique_lms_suffixes_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
const sa_sint_t * RESTRICT SAnm = &SA[(fast_sint_t)n - (fast_sint_t)m - 1 + l];
sa_sint_t i, j; fast_sint_t tmp = *SAnm++;
for (i = (sa_sint_t)omp_block_start, j = (sa_sint_t)omp_block_start + (sa_sint_t)omp_block_size - 6; i < j; i += 4)
{
libsais16_prefetch(&T[i + prefetch_distance]);
sa_sint_t c0 = T[i + 0]; if (c0 < 0) { T[i + 0] = c0 & SAINT_MAX; SA[tmp] = i + 0; i++; tmp = *SAnm++; }
sa_sint_t c1 = T[i + 1]; if (c1 < 0) { T[i + 1] = c1 & SAINT_MAX; SA[tmp] = i + 1; i++; tmp = *SAnm++; }
sa_sint_t c2 = T[i + 2]; if (c2 < 0) { T[i + 2] = c2 & SAINT_MAX; SA[tmp] = i + 2; i++; tmp = *SAnm++; }
sa_sint_t c3 = T[i + 3]; if (c3 < 0) { T[i + 3] = c3 & SAINT_MAX; SA[tmp] = i + 3; i++; tmp = *SAnm++; }
}
for (j += 6; i < j; i += 1)
{
sa_sint_t c = T[i]; if (c < 0) { T[i] = c & SAINT_MAX; SA[tmp] = i; i++; tmp = *SAnm++; }
}
}
static void libsais16_merge_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
const sa_sint_t * RESTRICT SAnm = &SA[(fast_sint_t)n - (fast_sint_t)m - 1 + l];
fast_sint_t i, j; sa_sint_t tmp = *SAnm++;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 3; i < j; i += 4)
{
libsais16_prefetch(&SA[i + prefetch_distance]);
if (SA[i + 0] == 0) { SA[i + 0] = tmp; tmp = *SAnm++; }
if (SA[i + 1] == 0) { SA[i + 1] = tmp; tmp = *SAnm++; }
if (SA[i + 2] == 0) { SA[i + 2] = tmp; tmp = *SAnm++; }
if (SA[i + 3] == 0) { SA[i + 3] = tmp; tmp = *SAnm++; }
}
for (j += 3; i < j; i += 1)
{
if (SA[i] == 0) { SA[i] = tmp; tmp = *SAnm++; }
}
}
static void libsais16_merge_unique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
libsais16_merge_unique_lms_suffixes_32s(T, SA, n, m, 0, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais16_count_negative_marked_suffixes(T, omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; }
libsais16_merge_unique_lms_suffixes_32s(T, SA, n, m, count, omp_block_start, omp_block_size);
}
}
#endif
}
}
static void libsais16_merge_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start;
if (omp_num_threads == 1)
{
libsais16_merge_nonunique_lms_suffixes_32s(SA, n, m, f, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais16_count_zero_marked_suffixes(SA, omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t t, count = f; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; }
libsais16_merge_nonunique_lms_suffixes_32s(SA, n, m, count, omp_block_start, omp_block_size);
}
}
#endif
}
}
static void libsais16_merge_compacted_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais16_merge_unique_lms_suffixes_32s_omp(T, SA, n, m, threads, thread_state);
libsais16_merge_nonunique_lms_suffixes_32s_omp(SA, n, m, f, threads, thread_state);
}
static void libsais16_reconstruct_compacted_lms_suffixes_32s_2k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (f > 0)
{
memmove(&SA[n - m - 1], &SA[n + fs - m], (size_t)f * sizeof(sa_sint_t));
libsais16_count_and_gather_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state);
libsais16_reconstruct_lms_suffixes_omp(SA, n, m - f, threads);
memcpy(&SA[n - m - 1 + f], &SA[0], ((size_t)m - (size_t)f) * sizeof(sa_sint_t));
memset(&SA[0], 0, (size_t)m * sizeof(sa_sint_t));
libsais16_merge_compacted_lms_suffixes_32s_omp(T, SA, n, m, f, threads, thread_state);
}
else
{
libsais16_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n);
libsais16_reconstruct_lms_suffixes_omp(SA, n, m, threads);
}
}
static void libsais16_reconstruct_compacted_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (f > 0)
{
memmove(&SA[n - m - 1], &SA[n + fs - m], (size_t)f * sizeof(sa_sint_t));
libsais16_gather_compacted_lms_suffixes_32s(T, SA, n);
libsais16_reconstruct_lms_suffixes_omp(SA, n, m - f, threads);
memcpy(&SA[n - m - 1 + f], &SA[0], ((size_t)m - (size_t)f) * sizeof(sa_sint_t));
memset(&SA[0], 0, (size_t)m * sizeof(sa_sint_t));
libsais16_merge_compacted_lms_suffixes_32s_omp(T, SA, n, m, f, threads, thread_state);
}
else
{
libsais16_gather_lms_suffixes_32s(T, SA, n);
libsais16_reconstruct_lms_suffixes_omp(SA, n, m, threads);
}
}
static sa_sint_t libsais16_main_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
fs = fs < (SAINT_MAX - n) ? fs : (SAINT_MAX - n);
if (k > 0 && fs / k >= 6)
{
sa_sint_t alignment = (fs - 1024) / k >= 6 ? 1024 : 16;
sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 6 ? (sa_sint_t *)libsais16_align_up(&SA[n + fs - 6 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 6 * k];
sa_sint_t m = libsais16_count_and_gather_lms_suffixes_32s_4k_omp(T, SA, n, k, buckets, threads, thread_state);
if (m > 1)
{
memset(SA, 0, ((size_t)n - (size_t)m) * sizeof(sa_sint_t));
sa_sint_t first_lms_suffix = SA[n - m];
sa_sint_t left_suffixes_count = libsais16_initialize_buckets_for_lms_suffixes_radix_sort_32s_6k(T, k, buckets, first_lms_suffix);
libsais16_radix_sort_lms_suffixes_32s_6k_omp(T, SA, n, m, &buckets[4 * k], threads, thread_state);
libsais16_radix_sort_set_markers_32s_6k_omp(SA, k, &buckets[4 * k], threads);
if (threads > 1 && n >= 65536) { memset(&SA[(fast_sint_t)n - (fast_sint_t)m], 0, (size_t)m * sizeof(sa_sint_t)); }
libsais16_initialize_buckets_for_partial_sorting_32s_6k(T, k, buckets, first_lms_suffix, left_suffixes_count);
libsais16_induce_partial_order_32s_6k_omp(T, SA, n, k, buckets, first_lms_suffix, left_suffixes_count, threads, thread_state);
sa_sint_t names = libsais16_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(SA, n, m, threads, thread_state);
if (names < m)
{
sa_sint_t f = libsais16_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state);
if (libsais16_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0)
{
return -2;
}
libsais16_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state);
}
else
{
libsais16_count_lms_suffixes_32s_2k(T, n, k, buckets);
}
libsais16_initialize_buckets_start_and_end_32s_4k(k, buckets);
libsais16_place_lms_suffixes_histogram_32s_4k(SA, n, k, m, buckets);
libsais16_induce_final_order_32s_4k(T, SA, n, k, buckets, threads, thread_state);
}
else
{
SA[0] = SA[n - 1];
libsais16_initialize_buckets_start_and_end_32s_6k(k, buckets);
libsais16_place_lms_suffixes_histogram_32s_6k(SA, n, k, m, buckets);
libsais16_induce_final_order_32s_6k(T, SA, n, k, buckets, threads, thread_state);
}
return 0;
}
else if (k > 0 && fs / k >= 4)
{
sa_sint_t alignment = (fs - 1024) / k >= 4 ? 1024 : 16;
sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 4 ? (sa_sint_t *)libsais16_align_up(&SA[n + fs - 4 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 4 * k];
sa_sint_t m = libsais16_count_and_gather_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state);
if (m > 1)
{
libsais16_initialize_buckets_for_radix_and_partial_sorting_32s_4k(T, k, buckets, SA[n - m]);
libsais16_radix_sort_lms_suffixes_32s_2k_omp(T, SA, n, m, &buckets[1], threads, thread_state);
libsais16_radix_sort_set_markers_32s_4k_omp(SA, k, &buckets[1], threads);
libsais16_place_lms_suffixes_interval_32s_4k(SA, n, k, m - 1, buckets);
libsais16_induce_partial_order_32s_4k_omp(T, SA, n, k, buckets, threads, thread_state);
sa_sint_t names = libsais16_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(SA, n, m, threads, thread_state);
if (names < m)
{
sa_sint_t f = libsais16_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state);
if (libsais16_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0)
{
return -2;
}
libsais16_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state);
}
else
{
libsais16_count_lms_suffixes_32s_2k(T, n, k, buckets);
}
}
else
{
SA[0] = SA[n - 1];
}
libsais16_initialize_buckets_start_and_end_32s_4k(k, buckets);
libsais16_place_lms_suffixes_histogram_32s_4k(SA, n, k, m, buckets);
libsais16_induce_final_order_32s_4k(T, SA, n, k, buckets, threads, thread_state);
return 0;
}
else if (k > 0 && fs / k >= 2)
{
sa_sint_t alignment = (fs - 1024) / k >= 2 ? 1024 : 16;
sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 2 ? (sa_sint_t *)libsais16_align_up(&SA[n + fs - 2 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 2 * k];
sa_sint_t m = libsais16_count_and_gather_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state);
if (m > 1)
{
libsais16_initialize_buckets_for_lms_suffixes_radix_sort_32s_2k(T, k, buckets, SA[n - m]);
libsais16_radix_sort_lms_suffixes_32s_2k_omp(T, SA, n, m, &buckets[1], threads, thread_state);
libsais16_place_lms_suffixes_interval_32s_2k(SA, n, k, m - 1, buckets);
libsais16_initialize_buckets_start_and_end_32s_2k(k, buckets);
libsais16_induce_partial_order_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state);
sa_sint_t names = libsais16_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(T, SA, n, m, threads);
if (names < m)
{
sa_sint_t f = libsais16_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state);
if (libsais16_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0)
{
return -2;
}
libsais16_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state);
}
else
{
libsais16_count_lms_suffixes_32s_2k(T, n, k, buckets);
}
}
else
{
SA[0] = SA[n - 1];
}
libsais16_initialize_buckets_end_32s_2k(k, buckets);
libsais16_place_lms_suffixes_histogram_32s_2k(SA, n, k, m, buckets);
libsais16_initialize_buckets_start_and_end_32s_2k(k, buckets);
libsais16_induce_final_order_32s_2k(T, SA, n, k, buckets, threads, thread_state);
return 0;
}
else
{
sa_sint_t * buffer = fs < k ? (sa_sint_t *)libsais16_alloc_aligned((size_t)k * sizeof(sa_sint_t), 4096) : (sa_sint_t *)NULL;
sa_sint_t alignment = fs - 1024 >= k ? 1024 : 16;
sa_sint_t * RESTRICT buckets = fs - alignment >= k ? (sa_sint_t *)libsais16_align_up(&SA[n + fs - k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : fs >= k ? &SA[n + fs - k] : buffer;
if (buckets == NULL) { return -2; }
memset(SA, 0, (size_t)n * sizeof(sa_sint_t));
libsais16_count_suffixes_32s(T, n, k, buckets);
libsais16_initialize_buckets_end_32s_1k(k, buckets);
sa_sint_t m = libsais16_radix_sort_lms_suffixes_32s_1k(T, SA, n, buckets);
if (m > 1)
{
libsais16_induce_partial_order_32s_1k_omp(T, SA, n, k, buckets, threads, thread_state);
sa_sint_t names = libsais16_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(T, SA, n, m, threads);
if (names < m)
{
if (buffer != NULL) { libsais16_free_aligned(buffer); buckets = NULL; }
sa_sint_t f = libsais16_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state);
if (libsais16_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0)
{
return -2;
}
libsais16_reconstruct_compacted_lms_suffixes_32s_1k_omp(T, SA, n, m, fs, f, threads, thread_state);
if (buckets == NULL) { buckets = buffer = (sa_sint_t *)libsais16_alloc_aligned((size_t)k * sizeof(sa_sint_t), 4096); }
if (buckets == NULL) { return -2; }
}
libsais16_count_suffixes_32s(T, n, k, buckets);
libsais16_initialize_buckets_end_32s_1k(k, buckets);
libsais16_place_lms_suffixes_interval_32s_1k(T, SA, k, m, buckets);
}
libsais16_induce_final_order_32s_1k(T, SA, n, k, buckets, threads, thread_state);
libsais16_free_aligned(buffer);
return 0;
}
}
static sa_sint_t libsais16_main_16u(const uint16_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t bwt, sa_sint_t r, sa_sint_t * RESTRICT I, sa_sint_t fs, sa_sint_t * freq, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
fs = fs < (SAINT_MAX - n) ? fs : (SAINT_MAX - n);
sa_sint_t m = libsais16_count_and_gather_lms_suffixes_16u_omp(T, SA, n, buckets, threads, thread_state);
libsais16_initialize_buckets_start_and_end_16u(buckets, freq);
if (m > 0)
{
sa_sint_t first_lms_suffix = SA[n - m];
sa_sint_t left_suffixes_count = libsais16_initialize_buckets_for_lms_suffixes_radix_sort_16u(T, buckets, first_lms_suffix);
if (threads > 1 && n >= 65536) { memset(SA, 0, ((size_t)n - (size_t)m) * sizeof(sa_sint_t)); }
libsais16_radix_sort_lms_suffixes_16u_omp(T, SA, n, m, buckets, threads, thread_state);
if (threads > 1 && n >= 65536) { memset(&SA[(fast_sint_t)n - (fast_sint_t)m], 0, (size_t)m * sizeof(sa_sint_t)); }
libsais16_initialize_buckets_for_partial_sorting_16u(T, buckets, first_lms_suffix, left_suffixes_count);
libsais16_induce_partial_order_16u_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, threads, thread_state);
sa_sint_t names = libsais16_renumber_and_gather_lms_suffixes_16u_omp(SA, n, m, fs, threads, thread_state);
if (names < m)
{
if (libsais16_main_32s(SA + n + fs - m, SA, m, names, fs + n - 2 * m, threads, thread_state) != 0)
{
return -2;
}
libsais16_gather_lms_suffixes_16u_omp(T, SA, n, threads, thread_state);
libsais16_reconstruct_lms_suffixes_omp(SA, n, m, threads);
}
libsais16_place_lms_suffixes_interval_16u(SA, n, m, buckets);
}
else
{
memset(SA, 0, (size_t)n * sizeof(sa_sint_t));
}
return libsais16_induce_final_order_16u_omp(T, SA, n, bwt, r, I, buckets, threads, thread_state);
}
static sa_sint_t libsais16_main(const uint16_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * I, sa_sint_t fs, sa_sint_t * freq, sa_sint_t threads)
{
LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais16_alloc_thread_state(threads) : NULL;
sa_sint_t * RESTRICT buckets = (sa_sint_t *)libsais16_alloc_aligned(8 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096);
sa_sint_t index = buckets != NULL && (thread_state != NULL || threads == 1)
? libsais16_main_16u(T, SA, n, buckets, bwt, r, I, fs, freq, threads, thread_state)
: -2;
libsais16_free_aligned(buckets);
libsais16_free_thread_state(thread_state);
return index;
}
static sa_sint_t libsais16_main_ctx(const LIBSAIS_CONTEXT * ctx, const uint16_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * I, sa_sint_t fs, sa_sint_t * freq)
{
return ctx != NULL && (ctx->buckets != NULL && (ctx->thread_state != NULL || ctx->threads == 1))
? libsais16_main_16u(T, SA, n, ctx->buckets, bwt, r, I, fs, freq, (sa_sint_t)ctx->threads, ctx->thread_state)
: -2;
}
static void libsais16_bwt_copy_16u(uint16_t * RESTRICT U, sa_sint_t * RESTRICT A, sa_sint_t n)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = 0, j = (fast_sint_t)n - 7; i < j; i += 8)
{
libsais16_prefetch(&A[i + prefetch_distance]);
U[i + 0] = (uint16_t)A[i + 0];
U[i + 1] = (uint16_t)A[i + 1];
U[i + 2] = (uint16_t)A[i + 2];
U[i + 3] = (uint16_t)A[i + 3];
U[i + 4] = (uint16_t)A[i + 4];
U[i + 5] = (uint16_t)A[i + 5];
U[i + 6] = (uint16_t)A[i + 6];
U[i + 7] = (uint16_t)A[i + 7];
}
for (j += 7; i < j; i += 1)
{
U[i] = (uint16_t)A[i];
}
}
#if defined(_OPENMP)
static void libsais16_bwt_copy_16u_omp(uint16_t * RESTRICT U, sa_sint_t * RESTRICT A, sa_sint_t n, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
fast_sint_t omp_block_stride = ((fast_sint_t)n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)n - omp_block_start;
#else
UNUSED(threads);
fast_sint_t omp_block_start = 0;
fast_sint_t omp_block_size = (fast_sint_t)n;
#endif
libsais16_bwt_copy_16u(U + omp_block_start, A + omp_block_start, (sa_sint_t)omp_block_size);
}
}
#endif
void * libsais16_create_ctx(void)
{
return (void *)libsais16_create_ctx_main(1);
}
void libsais16_free_ctx(void * ctx)
{
libsais16_free_ctx_main((LIBSAIS_CONTEXT *)ctx);
}
int32_t libsais16(const uint16_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t * freq)
{
if ((T == NULL) || (SA == NULL) || (n < 0) || (fs < 0))
{
return -1;
}
else if (n < 2)
{
if (freq != NULL) { memset(freq, 0, ALPHABET_SIZE * sizeof(int32_t)); }
if (n == 1) { SA[0] = 0; if (freq != NULL) { freq[T[0]]++; } }
return 0;
}
return libsais16_main(T, SA, n, 0, 0, NULL, fs, freq, 1);
}
int32_t libsais16_ctx(const void * ctx, const uint16_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t * freq)
{
if ((ctx == NULL) || (T == NULL) || (SA == NULL) || (n < 0) || (fs < 0))
{
return -1;
}
else if (n < 2)
{
if (freq != NULL) { memset(freq, 0, ALPHABET_SIZE * sizeof(int32_t)); }
if (n == 1) { SA[0] = 0; if (freq != NULL) { freq[T[0]]++; } }
return 0;
}
return libsais16_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, SA, n, 0, 0, NULL, fs, freq);
}
int32_t libsais16_bwt(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0))
{
return -1;
}
else if (n <= 1)
{
if (freq != NULL) { memset(freq, 0, ALPHABET_SIZE * sizeof(int32_t)); }
if (n == 1) { U[0] = T[0]; if (freq != NULL) { freq[T[0]]++; } }
return n;
}
sa_sint_t index = libsais16_main(T, A, n, 1, 0, NULL, fs, freq, 1);
if (index >= 0)
{
index++;
U[0] = T[n - 1];
libsais16_bwt_copy_16u(U + 1, A, index - 1);
libsais16_bwt_copy_16u(U + index, A + index, n - index);
}
return index;
}
int32_t libsais16_bwt_aux(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t r, int32_t * I)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL))
{
return -1;
}
else if (n <= 1)
{
if (freq != NULL) { memset(freq, 0, ALPHABET_SIZE * sizeof(int32_t)); }
if (n == 1) { U[0] = T[0]; if (freq != NULL) { freq[T[0]]++; } }
I[0] = n;
return 0;
}
if (libsais16_main(T, A, n, 1, r, I, fs, freq, 1) != 0)
{
return -2;
}
U[0] = T[n - 1];
libsais16_bwt_copy_16u(U + 1, A, I[0] - 1);
libsais16_bwt_copy_16u(U + I[0], A + I[0], n - I[0]);
return 0;
}
int32_t libsais16_bwt_ctx(const void * ctx, const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq)
{
if ((ctx == NULL) || (T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0))
{
return -1;
}
else if (n <= 1)
{
if (freq != NULL) { memset(freq, 0, ALPHABET_SIZE * sizeof(int32_t)); }
if (n == 1) { U[0] = T[0]; if (freq != NULL) { freq[T[0]]++; } }
return n;
}
sa_sint_t index = libsais16_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, A, n, 1, 0, NULL, fs, freq);
if (index >= 0)
{
index++;
U[0] = T[n - 1];
#if defined(_OPENMP)
libsais16_bwt_copy_16u_omp(U + 1, A, index - 1, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads);
libsais16_bwt_copy_16u_omp(U + index, A + index, n - index, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads);
#else
libsais16_bwt_copy_16u(U + 1, A, index - 1);
libsais16_bwt_copy_16u(U + index, A + index, n - index);
#endif
}
return index;
}
int32_t libsais16_bwt_aux_ctx(const void * ctx, const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t r, int32_t * I)
{
if ((ctx == NULL) || (T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL))
{
return -1;
}
else if (n <= 1)
{
if (freq != NULL) { memset(freq, 0, ALPHABET_SIZE * sizeof(int32_t)); }
if (n == 1) { U[0] = T[0]; if (freq != NULL) { freq[T[0]]++; } }
I[0] = n;
return 0;
}
if (libsais16_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, A, n, 1, r, I, fs, freq) != 0)
{
return -2;
}
U[0] = T[n - 1];
#if defined(_OPENMP)
libsais16_bwt_copy_16u_omp(U + 1, A, I[0] - 1, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads);
libsais16_bwt_copy_16u_omp(U + I[0], A + I[0], n - I[0], (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads);
#else
libsais16_bwt_copy_16u(U + 1, A, I[0] - 1);
libsais16_bwt_copy_16u(U + I[0], A + I[0], n - I[0]);
#endif
return 0;
}
#if defined(_OPENMP)
void * libsais16_create_ctx_omp(int32_t threads)
{
if (threads < 0) { return NULL; }
threads = threads > 0 ? threads : omp_get_max_threads();
return (void *)libsais16_create_ctx_main(threads);
}
int32_t libsais16_omp(const uint16_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t * freq, int32_t threads)
{
if ((T == NULL) || (SA == NULL) || (n < 0) || (fs < 0) || (threads < 0))
{
return -1;
}
else if (n < 2)
{
if (freq != NULL) { memset(freq, 0, ALPHABET_SIZE * sizeof(int32_t)); }
if (n == 1) { SA[0] = 0; if (freq != NULL) { freq[T[0]]++; } }
return 0;
}
threads = threads > 0 ? threads : omp_get_max_threads();
return libsais16_main(T, SA, n, 0, 0, NULL, fs, freq, threads);
}
int32_t libsais16_bwt_omp(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t threads)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (threads < 0))
{
return -1;
}
else if (n <= 1)
{
if (freq != NULL) { memset(freq, 0, ALPHABET_SIZE * sizeof(int32_t)); }
if (n == 1) { U[0] = T[0]; if (freq != NULL) { freq[T[0]]++; } }
return n;
}
threads = threads > 0 ? threads : omp_get_max_threads();
sa_sint_t index = libsais16_main(T, A, n, 1, 0, NULL, fs, freq, threads);
if (index >= 0)
{
index++;
U[0] = T[n - 1];
libsais16_bwt_copy_16u_omp(U + 1, A, index - 1, threads);
libsais16_bwt_copy_16u_omp(U + index, A + index, n - index, threads);
}
return index;
}
int32_t libsais16_bwt_aux_omp(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t r, int32_t * I, int32_t threads)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL) || (threads < 0))
{
return -1;
}
else if (n <= 1)
{
if (freq != NULL) { memset(freq, 0, ALPHABET_SIZE * sizeof(int32_t)); }
if (n == 1) { U[0] = T[0]; if (freq != NULL) { freq[T[0]]++; } }
I[0] = n;
return 0;
}
threads = threads > 0 ? threads : omp_get_max_threads();
if (libsais16_main(T, A, n, 1, r, I, fs, freq, threads) != 0)
{
return -2;
}
U[0] = T[n - 1];
libsais16_bwt_copy_16u_omp(U + 1, A, I[0] - 1, threads);
libsais16_bwt_copy_16u_omp(U + I[0], A + I[0], n - I[0], threads);
return 0;
}
#endif
static LIBSAIS_UNBWT_CONTEXT * libsais16_unbwt_create_ctx_main(sa_sint_t threads)
{
LIBSAIS_UNBWT_CONTEXT * RESTRICT ctx = (LIBSAIS_UNBWT_CONTEXT *)libsais16_alloc_aligned(sizeof(LIBSAIS_UNBWT_CONTEXT), 64);
sa_uint_t * RESTRICT bucket2 = (sa_uint_t *)libsais16_alloc_aligned(ALPHABET_SIZE * sizeof(sa_uint_t), 4096);
uint16_t * RESTRICT fastbits = (uint16_t *)libsais16_alloc_aligned((1 + (1 << UNBWT_FASTBITS)) * sizeof(uint16_t), 4096);
sa_uint_t * RESTRICT buckets = threads > 1 ? (sa_uint_t *)libsais16_alloc_aligned((size_t)threads * ALPHABET_SIZE * sizeof(sa_uint_t), 4096) : NULL;
if (ctx != NULL && bucket2 != NULL && fastbits != NULL && (buckets != NULL || threads == 1))
{
ctx->bucket2 = bucket2;
ctx->fastbits = fastbits;
ctx->buckets = buckets;
ctx->threads = threads;
return ctx;
}
libsais16_free_aligned(buckets);
libsais16_free_aligned(fastbits);
libsais16_free_aligned(bucket2);
libsais16_free_aligned(ctx);
return NULL;
}
static void libsais16_unbwt_free_ctx_main(LIBSAIS_UNBWT_CONTEXT * ctx)
{
if (ctx != NULL)
{
libsais16_free_aligned(ctx->buckets);
libsais16_free_aligned(ctx->fastbits);
libsais16_free_aligned(ctx->bucket2);
libsais16_free_aligned(ctx);
}
}
static void libsais16_unbwt_compute_histogram(const uint16_t * RESTRICT T, fast_sint_t n, sa_uint_t * RESTRICT count)
{
fast_sint_t i; for (i = 0; i < n; i += 1) { count[T[i]]++; }
}
static void libsais16_unbwt_calculate_fastbits(sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift)
{
fast_uint_t v, w, sum;
for (v = 0, sum = 1, w = 0; w < ALPHABET_SIZE; ++w)
{
fast_uint_t prev = sum; sum += bucket2[w]; bucket2[w] = (sa_uint_t)prev;
if (prev != sum)
{
for (; v <= ((sum - 1) >> shift); ++v) { fastbits[v] = (uint16_t)w; }
}
}
}
static void libsais16_unbwt_calculate_P(const uint16_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, fast_uint_t index, fast_sint_t omp_block_start, fast_sint_t omp_block_end)
{
{
fast_sint_t i = omp_block_start, j = (fast_sint_t)index; if (omp_block_end < j) { j = omp_block_end; }
for (; i < j; ++i) { fast_uint_t c = T[i]; P[bucket2[c]++] = (sa_uint_t)i; }
}
{
fast_sint_t i = (fast_sint_t)index, j = omp_block_end; if (omp_block_start > i) { i = omp_block_start; }
for (T -= 1, i += 1; i <= j; ++i) { fast_uint_t c = T[i]; P[bucket2[c]++] = (sa_uint_t)i; }
}
}
static void libsais16_unbwt_init_single(const uint16_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_sint_t * freq, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits)
{
fast_uint_t index = I[0];
fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; }
if (freq != NULL)
{
memcpy(bucket2, freq, ALPHABET_SIZE * sizeof(sa_uint_t));
}
else
{
memset(bucket2, 0, ALPHABET_SIZE * sizeof(sa_uint_t));
libsais16_unbwt_compute_histogram(T, n, bucket2);
}
libsais16_unbwt_calculate_fastbits(bucket2, fastbits, shift);
libsais16_unbwt_calculate_P(T, P, bucket2, index, 0, n);
}
#if defined(_OPENMP)
static void libsais16_unbwt_init_parallel(const uint16_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_sint_t * freq, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_uint_t * RESTRICT buckets, sa_sint_t threads)
{
fast_uint_t index = I[0];
fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; }
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
{
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
if (omp_num_threads == 1)
{
libsais16_unbwt_init_single(T, P, n, freq, I, bucket2, fastbits);
}
else
{
{
sa_uint_t * RESTRICT bucket2_local = buckets + omp_thread_num * ALPHABET_SIZE;
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
memset(bucket2_local, 0, ALPHABET_SIZE * sizeof(sa_uint_t));
libsais16_unbwt_compute_histogram(T + omp_block_start, omp_block_size, bucket2_local);
}
#pragma omp barrier
{
sa_uint_t * RESTRICT bucket2_temp = buckets;
fast_sint_t omp_block_stride = (ALPHABET_SIZE / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ALPHABET_SIZE - omp_block_start;
memset(bucket2 + omp_block_start, 0, omp_block_size * sizeof(sa_uint_t));
fast_sint_t t;
for (t = 0; t < omp_num_threads; ++t, bucket2_temp += ALPHABET_SIZE)
{
fast_sint_t c; for (c = omp_block_start; c < omp_block_start + omp_block_size; c += 1) { sa_uint_t A = bucket2[c], B = bucket2_temp[c]; bucket2[c] = A + B; bucket2_temp[c] = A; }
}
}
#pragma omp barrier
#pragma omp master
{
libsais16_unbwt_calculate_fastbits(bucket2, fastbits, shift);
}
#pragma omp barrier
{
sa_uint_t * RESTRICT bucket2_local = buckets + omp_thread_num * ALPHABET_SIZE;
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_uint_t A = bucket2[c], B = bucket2_local[c]; bucket2_local[c] = A + B; }
libsais16_unbwt_calculate_P(T, P, bucket2_local, index, omp_block_start, omp_block_start + omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
memcpy(bucket2, buckets + (omp_num_threads - 1) * ALPHABET_SIZE, ALPHABET_SIZE * sizeof(sa_uint_t));
}
}
}
}
#endif
static void libsais16_unbwt_decode_1(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t * i0, fast_uint_t k)
{
uint16_t * RESTRICT U0 = U;
fast_uint_t i, p0 = *i0;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0;
}
*i0 = p0;
}
static void libsais16_unbwt_decode_2(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t k)
{
uint16_t * RESTRICT U0 = U;
uint16_t * RESTRICT U1 = U0 + r;
fast_uint_t i, p0 = *i0, p1 = *i1;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0;
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1;
}
*i0 = p0; *i1 = p1;
}
static void libsais16_unbwt_decode_3(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t k)
{
uint16_t * RESTRICT U0 = U;
uint16_t * RESTRICT U1 = U0 + r;
uint16_t * RESTRICT U2 = U1 + r;
fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0;
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1;
uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = c2;
}
*i0 = p0; *i1 = p1; *i2 = p2;
}
static void libsais16_unbwt_decode_4(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t k)
{
uint16_t * RESTRICT U0 = U;
uint16_t * RESTRICT U1 = U0 + r;
uint16_t * RESTRICT U2 = U1 + r;
uint16_t * RESTRICT U3 = U2 + r;
fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0;
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1;
uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = c2;
uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = c3;
}
*i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3;
}
static void libsais16_unbwt_decode_5(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t k)
{
uint16_t * RESTRICT U0 = U;
uint16_t * RESTRICT U1 = U0 + r;
uint16_t * RESTRICT U2 = U1 + r;
uint16_t * RESTRICT U3 = U2 + r;
uint16_t * RESTRICT U4 = U3 + r;
fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0;
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1;
uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = c2;
uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = c3;
uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = c4;
}
*i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4;
}
static void libsais16_unbwt_decode_6(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t k)
{
uint16_t * RESTRICT U0 = U;
uint16_t * RESTRICT U1 = U0 + r;
uint16_t * RESTRICT U2 = U1 + r;
uint16_t * RESTRICT U3 = U2 + r;
uint16_t * RESTRICT U4 = U3 + r;
uint16_t * RESTRICT U5 = U4 + r;
fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0;
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1;
uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = c2;
uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = c3;
uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = c4;
uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = c5;
}
*i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5;
}
static void libsais16_unbwt_decode_7(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t * i6, fast_uint_t k)
{
uint16_t * RESTRICT U0 = U;
uint16_t * RESTRICT U1 = U0 + r;
uint16_t * RESTRICT U2 = U1 + r;
uint16_t * RESTRICT U3 = U2 + r;
uint16_t * RESTRICT U4 = U3 + r;
uint16_t * RESTRICT U5 = U4 + r;
uint16_t * RESTRICT U6 = U5 + r;
fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5, p6 = *i6;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0;
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1;
uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = c2;
uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = c3;
uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = c4;
uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = c5;
uint16_t c6 = fastbits[p6 >> shift]; if (bucket2[c6] <= p6) { do { c6++; } while (bucket2[c6] <= p6); } p6 = P[p6]; U6[i] = c6;
}
*i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; *i6 = p6;
}
static void libsais16_unbwt_decode_8(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t * i6, fast_uint_t * i7, fast_uint_t k)
{
uint16_t * RESTRICT U0 = U;
uint16_t * RESTRICT U1 = U0 + r;
uint16_t * RESTRICT U2 = U1 + r;
uint16_t * RESTRICT U3 = U2 + r;
uint16_t * RESTRICT U4 = U3 + r;
uint16_t * RESTRICT U5 = U4 + r;
uint16_t * RESTRICT U6 = U5 + r;
uint16_t * RESTRICT U7 = U6 + r;
fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5, p6 = *i6, p7 = *i7;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0;
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1;
uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = c2;
uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = c3;
uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = c4;
uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = c5;
uint16_t c6 = fastbits[p6 >> shift]; if (bucket2[c6] <= p6) { do { c6++; } while (bucket2[c6] <= p6); } p6 = P[p6]; U6[i] = c6;
uint16_t c7 = fastbits[p7 >> shift]; if (bucket2[c7] <= p7) { do { c7++; } while (bucket2[c7] <= p7); } p7 = P[p7]; U7[i] = c7;
}
*i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; *i6 = p6; *i7 = p7;
}
static void libsais16_unbwt_decode(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_sint_t blocks, fast_uint_t reminder)
{
fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; }
fast_uint_t offset = 0;
while (blocks > 8)
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6], i7 = I[7];
libsais16_unbwt_decode_8(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, &i7, (fast_uint_t)r);
I += 8; blocks -= 8; offset += 8 * (fast_uint_t)r;
}
if (blocks == 1)
{
fast_uint_t i0 = I[0];
libsais16_unbwt_decode_1(U + offset, P, bucket2, fastbits, shift, &i0, reminder);
}
else if (blocks == 2)
{
fast_uint_t i0 = I[0], i1 = I[1];
libsais16_unbwt_decode_2(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, reminder);
libsais16_unbwt_decode_1(U + offset + reminder, P, bucket2, fastbits, shift, &i0, ((fast_uint_t)r) - reminder);
}
else if (blocks == 3)
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2];
libsais16_unbwt_decode_3(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, reminder);
libsais16_unbwt_decode_2(U + offset + reminder, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, ((fast_uint_t)r) - reminder);
}
else if (blocks == 4)
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3];
libsais16_unbwt_decode_4(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, reminder);
libsais16_unbwt_decode_3(U + offset + reminder, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, ((fast_uint_t)r) - reminder);
}
else if (blocks == 5)
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4];
libsais16_unbwt_decode_5(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, reminder);
libsais16_unbwt_decode_4(U + offset + reminder, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, ((fast_uint_t)r) - reminder);
}
else if (blocks == 6)
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5];
libsais16_unbwt_decode_6(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, reminder);
libsais16_unbwt_decode_5(U + offset + reminder, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, ((fast_uint_t)r) - reminder);
}
else if (blocks == 7)
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6];
libsais16_unbwt_decode_7(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, reminder);
libsais16_unbwt_decode_6(U + offset + reminder, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, ((fast_uint_t)r) - reminder);
}
else
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6], i7 = I[7];
libsais16_unbwt_decode_8(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, &i7, reminder);
libsais16_unbwt_decode_7(U + offset + reminder, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, ((fast_uint_t)r) - reminder);
}
}
static void libsais16_unbwt_decode_omp(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_sint_t threads)
{
fast_sint_t blocks = 1 + (((fast_sint_t)n - 1) / (fast_sint_t)r);
fast_uint_t reminder = (fast_uint_t)n - ((fast_uint_t)r * ((fast_uint_t)blocks - 1));
#if defined(_OPENMP)
fast_sint_t max_threads = blocks < threads ? blocks : threads;
#pragma omp parallel num_threads(max_threads) if(max_threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = blocks / omp_num_threads;
fast_sint_t omp_block_reminder = blocks % omp_num_threads;
fast_sint_t omp_block_size = omp_block_stride + (omp_thread_num < omp_block_reminder);
fast_sint_t omp_block_start = omp_block_stride * omp_thread_num + (omp_thread_num < omp_block_reminder ? omp_thread_num : omp_block_reminder);
libsais16_unbwt_decode(U + r * omp_block_start, P, n, r, I + omp_block_start, bucket2, fastbits, omp_block_size, omp_thread_num < omp_num_threads - 1 ? (fast_uint_t)r : reminder);
}
}
static sa_sint_t libsais16_unbwt_core(const uint16_t * RESTRICT T, uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_sint_t * freq, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_uint_t * RESTRICT buckets, sa_sint_t threads)
{
#if defined(_OPENMP)
if (threads > 1 && n >= 262144)
{
libsais16_unbwt_init_parallel(T, P, n, freq, I, bucket2, fastbits, buckets, threads);
}
else
#else
UNUSED(buckets);
#endif
{
libsais16_unbwt_init_single(T, P, n, freq, I, bucket2, fastbits);
}
libsais16_unbwt_decode_omp(U, P, n, r, I, bucket2, fastbits, threads);
return 0;
}
static sa_sint_t libsais16_unbwt_main(const uint16_t * T, uint16_t * U, sa_uint_t * P, sa_sint_t n, const sa_sint_t * freq, sa_sint_t r, const sa_uint_t * I, sa_sint_t threads)
{
fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; }
sa_uint_t * RESTRICT bucket2 = (sa_uint_t *)libsais16_alloc_aligned(ALPHABET_SIZE * sizeof(sa_uint_t), 4096);
uint16_t * RESTRICT fastbits = (uint16_t *)libsais16_alloc_aligned(((size_t)1 + (size_t)(n >> shift)) * sizeof(uint16_t), 4096);
sa_uint_t * RESTRICT buckets = threads > 1 && n >= 262144 ? (sa_uint_t *)libsais16_alloc_aligned((size_t)threads * ALPHABET_SIZE * sizeof(sa_uint_t), 4096) : NULL;
sa_sint_t index = bucket2 != NULL && fastbits != NULL && (buckets != NULL || threads == 1 || n < 262144)
? libsais16_unbwt_core(T, U, P, n, freq, r, I, bucket2, fastbits, buckets, threads)
: -2;
libsais16_free_aligned(buckets);
libsais16_free_aligned(fastbits);
libsais16_free_aligned(bucket2);
return index;
}
static sa_sint_t libsais16_unbwt_main_ctx(const LIBSAIS_UNBWT_CONTEXT * ctx, const uint16_t * T, uint16_t * U, sa_uint_t * P, sa_sint_t n, const sa_sint_t * freq, sa_sint_t r, const sa_uint_t * I)
{
return ctx != NULL && ctx->bucket2 != NULL && ctx->fastbits != NULL && (ctx->buckets != NULL || ctx->threads == 1)
? libsais16_unbwt_core(T, U, P, n, freq, r, I, ctx->bucket2, ctx->fastbits, ctx->buckets, (sa_sint_t)ctx->threads)
: -2;
}
void * libsais16_unbwt_create_ctx(void)
{
return (void *)libsais16_unbwt_create_ctx_main(1);
}
void libsais16_unbwt_free_ctx(void * ctx)
{
libsais16_unbwt_free_ctx_main((LIBSAIS_UNBWT_CONTEXT *)ctx);
}
int32_t libsais16_unbwt(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t i)
{
return libsais16_unbwt_aux(T, U, A, n, freq, n, &i);
}
int32_t libsais16_unbwt_ctx(const void * ctx, const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t i)
{
return libsais16_unbwt_aux_ctx(ctx, T, U, A, n, freq, n, &i);
}
int32_t libsais16_unbwt_aux(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t r, const int32_t * I)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL))
{
return -1;
}
else if (n <= 1)
{
if (I[0] != n) { return -1; }
if (n == 1) { U[0] = T[0]; }
return 0;
}
fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } }
return libsais16_unbwt_main(T, U, (sa_uint_t *)A, n, freq, r, (const sa_uint_t *)I, 1);
}
int32_t libsais16_unbwt_aux_ctx(const void * ctx, const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t r, const int32_t * I)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL))
{
return -1;
}
else if (n <= 1)
{
if (I[0] != n) { return -1; }
if (n == 1) { U[0] = T[0]; }
return 0;
}
fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } }
return libsais16_unbwt_main_ctx((const LIBSAIS_UNBWT_CONTEXT *)ctx, T, U, (sa_uint_t *)A, n, freq, r, (const sa_uint_t *)I);
}
#if defined(_OPENMP)
void * libsais16_unbwt_create_ctx_omp(int32_t threads)
{
if (threads < 0) { return NULL; }
threads = threads > 0 ? threads : omp_get_max_threads();
return (void *)libsais16_unbwt_create_ctx_main(threads);
}
int32_t libsais16_unbwt_omp(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t i, int32_t threads)
{
return libsais16_unbwt_aux_omp(T, U, A, n, freq, n, &i, threads);
}
int32_t libsais16_unbwt_aux_omp(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t r, const int32_t * I, int32_t threads)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL) || (threads < 0))
{
return -1;
}
else if (n <= 1)
{
if (I[0] != n) { return -1; }
if (n == 1) { U[0] = T[0]; }
return 0;
}
fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } }
threads = threads > 0 ? threads : omp_get_max_threads();
return libsais16_unbwt_main(T, U, (sa_uint_t *)A, n, freq, r, (const sa_uint_t *)I, threads);
}
#endif
|
GB_unop__identity_fp32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_fc32)
// op(A') function: GB (_unop_tran__identity_fp32_fc32)
// C type: float
// A type: GxB_FC32_t
// cast: float cij = (float) crealf (aij)
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) crealf (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) crealf (aij) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_fc32)
(
float *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
float z = (float) crealf (aij) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
float z = (float) crealf (aij) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
CALPHADFreeEnergyFunctionsTernary.h | #ifndef included_CALPHADFreeEnergyFunctionsTernary
#define included_CALPHADFreeEnergyFunctionsTernary
#include "CALPHADSpeciesPhaseGibbsEnergy.h"
#include "InterpolationType.h"
#include "Phases.h"
#include "datatypes.h"
#include "functions.h"
#include <boost/property_tree/ptree.hpp>
#include <fstream>
#include <iostream>
#include <math.h>
namespace Thermo4PFM
{
class CALPHADFreeEnergyFunctionsTernary
{
public:
CALPHADFreeEnergyFunctionsTernary(boost::property_tree::ptree& input_db,
boost::optional<boost::property_tree::ptree&> newton_db,
const EnergyInterpolationType energy_interp_func_type,
const ConcInterpolationType conc_interp_func_type);
~CALPHADFreeEnergyFunctionsTernary(){};
double computeFreeEnergy(const double temperature, const double* const conc,
const PhaseIndex pi, const bool gp = false);
void computeDerivFreeEnergy(const double temperature,
const double* const conc, const PhaseIndex pi, double* deriv);
void computeSecondDerivativeFreeEnergy(const double temp,
const double* const conc, const PhaseIndex pi, double* d2fdc2);
bool computeCeqT(const double temperature, double* ceq,
const int maxits = 20, const bool verbose = false);
/// Compute compositions and phase fractions ate ends of tie line
/// passing through nominal composition (c0,c1)
bool computeTieLine(const double temperature, const double c0,
const double c1, double* ceq, const int maxits = 20,
const bool verbose = false);
void preRunDiagnostics(const double T0 = 300., const double T1 = 3000.);
int computePhaseConcentrations(const double temperature,
const double* const conc, const double* const phi, double* x);
void energyVsPhiAndC(const double temperature, const double* const ceq,
const bool found_ceq, const double phi_well_scale,
const int npts_phi = 51,
const int npts_c = 50); // number of compositions to use (>1)
void printEnergyVsComposition(
const double temperature, std::ostream& os, const int npts = 100);
double fchem(const double* const phi, const double* const conc,
const double temperature);
void printEnergyVsPhiHeader(const double temperature, const int nphi,
const int nc0, const int nc1, const double c0min, const double c0max,
const double c1min, const double c1max, std::ostream& os) const;
void printEnergyVsPhi(const double* const conc, const double temperature,
const double phi_well_scale, const int npts, std::ostream& os);
private:
EnergyInterpolationType energy_interp_func_type_;
ConcInterpolationType conc_interp_func_type_;
void readNewtonparameters(boost::property_tree::ptree& newton_db);
void computeTdependentParameters(const double temperature,
CalphadDataType* L_AB_L, CalphadDataType* L_AC_L,
CalphadDataType* L_BC_L, CalphadDataType* L_ABC_L,
CalphadDataType* L_AB_S, CalphadDataType* L_AC_S,
CalphadDataType* L_BC_S, CalphadDataType* L_ABC_S, CalphadDataType* fA,
CalphadDataType* fB, CalphadDataType* fC);
char* fenergy_diag_filename_;
double newton_tol_;
double newton_alpha_;
int newton_maxits_;
bool newton_verbose_;
// Single species energies in each phase
// size 3 for species A, B, C
CALPHADSpeciesPhaseGibbsEnergy g_species_phaseL_[3];
CALPHADSpeciesPhaseGibbsEnergy g_species_phaseA_[3];
// size 4 for L0, L1, L2, L3, with 2 coefficient for linear expansion in T
// a+b*T
CalphadDataType LmixABPhaseL_[4][2];
CalphadDataType LmixABPhaseA_[4][2];
CalphadDataType LmixACPhaseL_[4][2];
CalphadDataType LmixACPhaseA_[4][2];
CalphadDataType LmixBCPhaseL_[4][2];
CalphadDataType LmixBCPhaseA_[4][2];
CalphadDataType LmixABCPhaseL_[3][2];
CalphadDataType LmixABCPhaseA_[3][2];
double (*fun_ptr_arr_[3])(const double){ linear_interp_func,
pbg_interp_func, harmonic_interp_func };
void readParameters(boost::property_tree::ptree& calphad_db);
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp declare target
#endif
// energy of species "is" in phase L,A,B
double getFenergyPhaseL(const short is, const double temperature)
{
return g_species_phaseL_[is].fenergy(temperature);
}
double getFenergyPhaseA(const short is, const double temperature)
{
return g_species_phaseA_[is].fenergy(temperature);
}
CalphadDataType lmix0ABPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix0ABPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix0ABPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix1ABPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix1ABPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix1ABPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix2ABPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix2ABPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix2ABPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix3ABPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix3ABPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix3ABPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix0ABPhaseL(const double temperature)
{
return LmixABPhaseL_[0][0] + LmixABPhaseL_[0][1] * temperature;
}
CalphadDataType lmix1ABPhaseL(const double temperature)
{
return LmixABPhaseL_[1][0] + LmixABPhaseL_[1][1] * temperature;
}
CalphadDataType lmix2ABPhaseL(const double temperature)
{
return LmixABPhaseL_[2][0] + LmixABPhaseL_[2][1] * temperature;
}
CalphadDataType lmix3ABPhaseL(const double temperature)
{
return LmixABPhaseL_[3][0] + LmixABPhaseL_[3][1] * temperature;
}
CalphadDataType lmix0ABPhaseA(const double temperature)
{
return LmixABPhaseA_[0][0] + LmixABPhaseA_[0][1] * temperature;
}
CalphadDataType lmix1ABPhaseA(const double temperature)
{
return LmixABPhaseA_[1][0] + LmixABPhaseA_[1][1] * temperature;
}
CalphadDataType lmix2ABPhaseA(const double temperature)
{
return LmixABPhaseA_[2][0] + LmixABPhaseA_[2][1] * temperature;
}
CalphadDataType lmix3ABPhaseA(const double temperature)
{
return LmixABPhaseA_[3][0] + LmixABPhaseA_[3][1] * temperature;
}
CalphadDataType lmix0ACPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix0ACPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix0ACPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix1ACPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix1ACPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix1ACPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix2ACPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix2ACPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix2ACPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix3ACPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix3ACPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix3ACPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix0ACPhaseL(const double temperature)
{
return LmixACPhaseL_[0][0] + LmixACPhaseL_[0][1] * temperature;
}
CalphadDataType lmix1ACPhaseL(const double temperature)
{
return LmixACPhaseL_[1][0] + LmixACPhaseL_[1][1] * temperature;
}
CalphadDataType lmix2ACPhaseL(const double temperature)
{
return LmixACPhaseL_[2][0] + LmixACPhaseL_[2][1] * temperature;
}
CalphadDataType lmix3ACPhaseL(const double temperature)
{
return LmixACPhaseL_[3][0] + LmixACPhaseL_[3][1] * temperature;
}
CalphadDataType lmix0ACPhaseA(const double temperature)
{
return LmixACPhaseA_[0][0] + LmixACPhaseA_[0][1] * temperature;
}
CalphadDataType lmix1ACPhaseA(const double temperature)
{
return LmixACPhaseA_[1][0] + LmixACPhaseA_[1][1] * temperature;
}
CalphadDataType lmix2ACPhaseA(const double temperature)
{
return LmixACPhaseA_[2][0] + LmixACPhaseA_[2][1] * temperature;
}
CalphadDataType lmix3ACPhaseA(const double temperature)
{
return LmixACPhaseA_[3][0] + LmixACPhaseA_[3][1] * temperature;
}
CalphadDataType lmix0BCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix0BCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix0BCPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix1BCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix1BCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix1BCPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix2BCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix2BCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix2BCPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix3BCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix3BCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix3BCPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix0BCPhaseL(const double temperature)
{
return LmixBCPhaseL_[0][0] + LmixBCPhaseL_[0][1] * temperature;
}
CalphadDataType lmix1BCPhaseL(const double temperature)
{
return LmixBCPhaseL_[1][0] + LmixBCPhaseL_[1][1] * temperature;
}
CalphadDataType lmix2BCPhaseL(const double temperature)
{
return LmixBCPhaseL_[2][0] + LmixBCPhaseL_[2][1] * temperature;
}
CalphadDataType lmix3BCPhaseL(const double temperature)
{
return LmixBCPhaseL_[3][0] + LmixBCPhaseL_[3][1] * temperature;
}
CalphadDataType lmix0BCPhaseA(const double temperature)
{
return LmixBCPhaseA_[0][0] + LmixBCPhaseA_[0][1] * temperature;
}
CalphadDataType lmix1BCPhaseA(const double temperature)
{
return LmixBCPhaseA_[1][0] + LmixBCPhaseA_[1][1] * temperature;
}
CalphadDataType lmix2BCPhaseA(const double temperature)
{
return LmixBCPhaseA_[2][0] + LmixBCPhaseA_[2][1] * temperature;
}
CalphadDataType lmix3BCPhaseA(const double temperature)
{
return LmixBCPhaseA_[3][0] + LmixBCPhaseA_[3][1] * temperature;
}
// ABC
CalphadDataType lmix0ABCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix0ABCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix0ABCPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix1ABCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix1ABCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix1ABCPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix2ABCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix2ABCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix2ABCPhaseA(temperature);
default:
return NAN;
}
}
// ABC liquid
CalphadDataType lmix0ABCPhaseL(const double temperature)
{
return LmixABCPhaseL_[0][0] + LmixABCPhaseL_[0][1] * temperature;
}
CalphadDataType lmix1ABCPhaseL(const double temperature)
{
return LmixABCPhaseL_[1][0] + LmixABCPhaseL_[1][1] * temperature;
}
CalphadDataType lmix2ABCPhaseL(const double temperature)
{
return LmixABCPhaseL_[2][0] + LmixABCPhaseL_[2][1] * temperature;
}
// ABC solid
CalphadDataType lmix0ABCPhaseA(const double temperature)
{
return LmixABCPhaseA_[0][0] + LmixABCPhaseA_[0][1] * temperature;
}
CalphadDataType lmix1ABCPhaseA(const double temperature)
{
return LmixABCPhaseA_[1][0] + LmixABCPhaseA_[1][1] * temperature;
}
CalphadDataType lmix2ABCPhaseA(const double temperature)
{
return LmixABCPhaseA_[2][0] + LmixABCPhaseA_[2][1] * temperature;
}
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp end declare target
#endif
void computePhasesFreeEnergies(const double temperature,
const double* const hphi, const double conc0, const double conc1,
double& fl, double& fa);
};
void readLmixTernaryParameters(
boost::property_tree::ptree& Lmix_db, CalphadDataType LmixABC[3][2]);
}
#endif
|
scaling_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_SCALING_SOLVER_H_INCLUDED )
#define KRATOS_SCALING_SOLVER_H_INCLUDED
// System includes
#include <cmath>
#include <complex>
// External includes
// Project includes
#include "includes/define.h"
#include "factories/linear_solver_factory.h"
#include "linear_solvers/linear_solver.h"
#include "utilities/openmp_utils.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ScalingSolver
* @ingroup KratosCore
* @brief This solvers rescales in order to improve the conditioning of the system
* @details Rescales the matrix, and uses a given linear solver
* @author Riccardo Rossi
* @tparam TSparseSpaceType The sparse space definition
* @tparam TDenseSpaceType The dense space definition
* @tparam TReordererType The reorder considered
*/
template<class TSparseSpaceType, class TDenseSpaceType,
class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> >
class ScalingSolver
: public LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ScalingSolver
KRATOS_CLASS_POINTER_DEFINITION(ScalingSolver);
/// Definition of the base type
typedef LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType> BaseType;
/// The definition of the spaces (sparse matrix)
typedef typename TSparseSpaceType::MatrixType SparseMatrixType;
/// The definition of the spaces (vector)
typedef typename TSparseSpaceType::VectorType VectorType;
/// The definition of the spaces (dense matrix)
typedef typename TDenseSpaceType::MatrixType DenseMatrixType;
/// The definition of the linear solver factory type
typedef LinearSolverFactory<TSparseSpaceType,TDenseSpaceType> LinearSolverFactoryType;
/// The index type definition to be consistent
typedef typename TSparseSpaceType::IndexType IndexType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
ScalingSolver()
{
}
/**
* @brief Constructor without parameters
* @param pLinearSolver The linear solver to be scaled
* @param SymmetricScaling If the scaling is symmetric (true by default)
*/
ScalingSolver(
typename BaseType::Pointer pLinearSolver,
const bool SymmetricScaling = true
) : BaseType (),
mpLinearSolver(pLinearSolver),
mSymmetricScaling(SymmetricScaling)
{
}
/**
* @brief Constructor with parameters
* @param ThisParameters The configuration parameters of the linear solver
*/
ScalingSolver(Parameters ThisParameters)
: BaseType ()
{
KRATOS_TRY
KRATOS_ERROR_IF_NOT(ThisParameters.Has("solver_type")) << "Solver_type must be specified to construct the ScalingSolver" << std::endl;
mpLinearSolver = LinearSolverFactoryType().Create(ThisParameters);
mSymmetricScaling = ThisParameters.Has("symmetric_scaling") ? ThisParameters["symmetric_scaling"].GetBool() : true;
KRATOS_CATCH("")
}
/// Copy constructor.
ScalingSolver(const ScalingSolver& Other) : BaseType(Other) {}
/// Destructor.
~ScalingSolver() override {}
///@}
///@name Operators
///@{
/// Assignment operator.
ScalingSolver& operator=(const ScalingSolver& Other)
{
BaseType::operator=(Other);
return *this;
}
///@}
///@name Operations
///@{
/** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example
* when solving a mixed u-p problem, it is important to identify the row associated to v and p.
* another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers
* which require knowledge on the spatial position of the nodes associated to a given dof.
* This function tells if the solver requires such data
*/
bool AdditionalPhysicalDataIsNeeded() override
{
return mpLinearSolver->AdditionalPhysicalDataIsNeeded();
}
/** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example
* when solving a mixed u-p problem, it is important to identify the row associated to v and p.
* another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers
* which require knowledge on the spatial position of the nodes associated to a given dof.
* This function is the place to eventually provide such data
*/
void ProvideAdditionalData(
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB,
typename ModelPart::DofsArrayType& rdof_set,
ModelPart& r_model_part
) override
{
mpLinearSolver->ProvideAdditionalData(rA,rX,rB,rdof_set,r_model_part);
}
void InitializeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
mpLinearSolver->InitializeSolutionStep(rA,rX,rB);
}
/** This function is designed to be called at the end of the solve step.
* for example this is the place to remove any data that we do not want to save for later
@param rA. System matrix
@param rX. Solution vector. it's also the initial guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
void FinalizeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
mpLinearSolver->FinalizeSolutionStep(rA,rX,rB);
}
/** This function is designed to clean up all internal data in the solver.
* Clear is designed to leave the solver object as if newly created.
* After a clear a new Initialize is needed
*/
void Clear() override
{
mpLinearSolver->Clear();
}
/** Normal solve method.
Solves the linear system Ax=b and puts the result on SystemVector& rX.
rX is also th initial guess for iterative methods.
@param rA. System matrix
@param rX. Solution vector. it's also the initial
guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
bool Solve(SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
if(this->IsNotConsistent(rA, rX, rB))
return false;
VectorType scaling_vector(rX.size());
//obtain the scaling matrix
GetScalingWeights(rA,scaling_vector);
//scale system
if(mSymmetricScaling == false)
{
KRATOS_THROW_ERROR(std::logic_error,"not yet implemented","")
}
else
{
IndexPartition<std::size_t>(scaling_vector.size()).for_each([&](std::size_t Index){
scaling_vector[Index] = sqrt(std::abs(scaling_vector[Index]));
});
SymmetricScaling(rA,scaling_vector);
}
//scale RHS
IndexPartition<std::size_t>(scaling_vector.size()).for_each([&](std::size_t Index){
rB[Index] /= scaling_vector[Index];
});
//solve the problem
bool is_solved = mpLinearSolver->Solve(rA,rX,rB);
//backscale the solution
if(mSymmetricScaling == true)
{
IndexPartition<std::size_t>(scaling_vector.size()).for_each([&](std::size_t Index){
rX[Index] /= scaling_vector[Index];
});
}
return is_solved;
}
///@}
///@name Access
///@{
IndexType GetIterationsNumber() override
{
return mpLinearSolver->GetIterationsNumber();
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "Composite Linear Solver. Uses internally the following linear solver " << mpLinearSolver->Info();
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
BaseType::PrintData(rOStream);
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mpLinearSolver;
bool mSymmetricScaling;
///@}
///@name Private Operators
///@{
static void SymmetricScaling( SparseMatrixType& A, const VectorType& aux)
{
//typedef unsigned int size_type;
//typedef double value_type;
//create partition
OpenMPUtils::PartitionVector partition;
int number_of_threads = ParallelUtilities::GetNumThreads();
OpenMPUtils::DivideInPartitions(A.size1(),number_of_threads, partition);
//parallel loop
#pragma omp parallel
{
int thread_id = OpenMPUtils::ThisThread();
int number_of_rows = partition[thread_id+1] - partition[thread_id];
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator row_iter_begin = A.index1_data().begin()+partition[thread_id];
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator index_2_begin = A.index2_data().begin()+*row_iter_begin;
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::iterator value_begin = A.value_data().begin()+*row_iter_begin;
perform_matrix_scaling( number_of_rows,
row_iter_begin,
index_2_begin,
value_begin,
partition[thread_id],
aux
);
}
}
/**
* calculates partial product resetting to Zero the output before
*/
static void perform_matrix_scaling(
int number_of_rows,
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator row_begin,
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator index2_begin,
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::iterator value_begin,
unsigned int output_begin_index,
const VectorType& weights
)
{
int row_size;
typename SparseMatrixType::index_array_type::const_iterator row_it = row_begin;
int kkk = output_begin_index;
for(int k = 0; k < number_of_rows; k++)
{
row_size= *(row_it+1)-*row_it;
row_it++;
const typename TDenseSpaceType::DataType row_weight = weights[kkk++];
for(int i = 0; i<row_size; i++)
{
const typename TDenseSpaceType::DataType col_weight = weights[*index2_begin];
typename TDenseSpaceType::DataType t = (*value_begin);
t /= (row_weight*col_weight);
(*value_begin) = t; //check if this is correcct!!
value_begin++;
index2_begin++;
}
}
}
static void GetScalingWeights( const SparseMatrixType& A, VectorType& aux)
{
//typedef unsigned int size_type;
//typedef double value_type;
//create partition
OpenMPUtils::PartitionVector partition;
int number_of_threads = ParallelUtilities::GetNumThreads();
OpenMPUtils::DivideInPartitions(A.size1(),number_of_threads, partition);
//parallel loop
#pragma omp parallel
{
int thread_id = OpenMPUtils::ThisThread();
int number_of_rows = partition[thread_id+1] - partition[thread_id];
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator row_iter_begin = A.index1_data().begin()+partition[thread_id];
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator index_2_begin = A.index2_data().begin()+*row_iter_begin;
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::const_iterator value_begin = A.value_data().begin()+*row_iter_begin;
GS2weights( number_of_rows,
row_iter_begin,
index_2_begin,
value_begin,
partition[thread_id],
aux
);
}
}
/**
* calculates partial product resetting to Zero the output before
*/
static void GS2weights(
int number_of_rows,
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator row_begin,
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator index2_begin,
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::const_iterator value_begin,
unsigned int output_begin_index,
VectorType& weights
)
{
int row_size;
typename SparseMatrixType::index_array_type::const_iterator row_it = row_begin;
int kkk = output_begin_index;
for(int k = 0; k < number_of_rows; k++)
{
row_size= *(row_it+1)-*row_it;
row_it++;
double t = 0.0;
for(int i = 0; i<row_size; i++)
{
double tmp = std::abs(*value_begin);
t += tmp*tmp;
value_begin++;
}
t = sqrt(t);
weights[kkk++] = t;
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class ScalingSolver
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TSparseSpaceType, class TDenseSpaceType,
class TPreconditionerType,
class TReordererType>
inline std::istream& operator >> (std::istream& IStream,
ScalingSolver<TSparseSpaceType, TDenseSpaceType,
TReordererType>& rThis)
{
return IStream;
}
/// output stream function
template<class TSparseSpaceType, class TDenseSpaceType,
class TPreconditionerType,
class TReordererType>
inline std::ostream& operator << (std::ostream& OStream,
const ScalingSolver<TSparseSpaceType, TDenseSpaceType,
TReordererType>& rThis)
{
rThis.PrintInfo(OStream);
OStream << std::endl;
rThis.PrintData(OStream);
return OStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_SCALING_SOLVER_H_INCLUDED defined
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
Stmt.h | //===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <string>
namespace llvm {
class FoldingSetNodeID;
}
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class IdentifierInfo;
class LabelDecl;
class ParmVarDecl;
class PrinterHelper;
struct PrintingPolicy;
class QualType;
class RecordDecl;
class SourceManager;
class StringLiteral;
class SwitchStmt;
class Token;
class VarDecl;
//===--------------------------------------------------------------------===//
// ExprIterator - Iterators for iterating over Stmt* arrays that contain
// only Expr*. This is needed because AST nodes use Stmt* arrays to store
// references to children (to be compatible with StmtIterator).
//===--------------------------------------------------------------------===//
class Stmt;
class Expr;
class ExprIterator : public std::iterator<std::forward_iterator_tag,
Expr *&, ptrdiff_t,
Expr *&, Expr *&> {
Stmt** I;
public:
ExprIterator(Stmt** i) : I(i) {}
ExprIterator() : I(nullptr) {}
ExprIterator& operator++() { ++I; return *this; }
ExprIterator operator-(size_t i) { return I-i; }
ExprIterator operator+(size_t i) { return I+i; }
Expr* operator[](size_t idx);
// FIXME: Verify that this will correctly return a signed distance.
signed operator-(const ExprIterator& R) const { return I - R.I; }
Expr* operator*() const;
Expr* operator->() const;
bool operator==(const ExprIterator& R) const { return I == R.I; }
bool operator!=(const ExprIterator& R) const { return I != R.I; }
bool operator>(const ExprIterator& R) const { return I > R.I; }
bool operator>=(const ExprIterator& R) const { return I >= R.I; }
};
class ConstExprIterator : public std::iterator<std::forward_iterator_tag,
const Expr *&, ptrdiff_t,
const Expr *&,
const Expr *&> {
const Stmt * const *I;
public:
ConstExprIterator(const Stmt * const *i) : I(i) {}
ConstExprIterator() : I(nullptr) {}
ConstExprIterator& operator++() { ++I; return *this; }
ConstExprIterator operator+(size_t i) const { return I+i; }
ConstExprIterator operator-(size_t i) const { return I-i; }
const Expr * operator[](size_t idx) const;
signed operator-(const ConstExprIterator& R) const { return I - R.I; }
const Expr * operator*() const;
const Expr * operator->() const;
bool operator==(const ConstExprIterator& R) const { return I == R.I; }
bool operator!=(const ConstExprIterator& R) const { return I != R.I; }
bool operator>(const ConstExprIterator& R) const { return I > R.I; }
bool operator>=(const ConstExprIterator& R) const { return I >= R.I; }
};
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class LLVM_ALIGNAS(LLVM_PTR_SIZE) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
void* operator new(size_t bytes) throw() {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void* data) throw() {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
class StmtBitfields {
friend class Stmt;
/// \brief The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class CompoundStmtBitfields {
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
};
class ExprBitfields {
friend class Expr;
friend class DeclRefExpr; // computeDependence
friend class InitListExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class ASTStmtReader; // deserialization
friend class CXXNewExpr; // ctor
friend class DependentScopeDeclRefExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CallExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class ObjCMessageExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ShuffleVectorExpr; // ctor
friend class ParenListExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class OverloadExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class AtomicExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 2;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = 16 };
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 2;
};
enum APFloatSemantics {
IEEEhalf,
IEEEsingle,
IEEEdouble,
x87DoubleExtended,
IEEEquad,
PPCDoubleDouble
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 2;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class DeclRefExprBitfields {
friend class DeclRefExpr;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
};
class CastExprBitfields {
friend class CastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned BasePathSize : 32 - 6 - NumExprBits;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
};
class ExprWithCleanupsBitfields {
friend class ExprWithCleanups;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
unsigned NumObjects : 32 - NumExprBits;
};
class PseudoObjectExprBitfields {
friend class PseudoObjectExpr;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class TypeTraitExprBitfields {
friend class TypeTraitExpr;
friend class ASTStmtReader;
friend class ASTStmtWriter;
unsigned : NumExprBits;
/// \brief The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// \brief If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// \brief The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
union {
StmtBitfields StmtBits;
CompoundStmtBitfields CompoundStmtBits;
ExprBitfields ExprBits;
CharacterLiteralBitfields CharacterLiteralBits;
FloatingLiteralBitfields FloatingLiteralBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
DeclRefExprBitfields DeclRefExprBits;
CastExprBitfields CastExprBits;
CallExprBitfields CallExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
InitListExprBitfields InitListExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
};
friend class ASTStmtReader;
friend class ASTStmtWriter;
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void* operator new(size_t bytes, void* mem) throw() {
return mem;
}
void operator delete(void*, const ASTContext&, unsigned) throw() { }
void operator delete(void*, const ASTContext*, unsigned) throw() { }
void operator delete(void*, size_t) throw() { }
void operator delete(void*, void*) throw() { }
public:
/// \brief A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell { };
private:
/// \brief Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// \brief Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt(StmtClass SC) {
static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getLocStart() const LLVM_READONLY;
SourceLocation getLocEnd() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// \brief Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy,
unsigned Indentation = 0) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip past any implicit AST nodes which might surround this
/// statement, such as ExprWithCleanups or ImplicitCastExpr nodes.
Stmt *IgnoreImplicit();
/// \brief Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
typedef StmtIterator child_iterator;
typedef ConstStmtIterator const_child_iterator;
typedef StmtRange child_range;
typedef ConstStmtRange const_child_range;
child_range children();
const_child_range children() const {
return const_cast<Stmt*>(this)->children();
}
child_iterator child_begin() { return children().first; }
child_iterator child_end() { return children().second; }
const_child_iterator child_begin() const { return children().first; }
const_child_iterator child_end() const { return children().second; }
/// \brief Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
///
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc,
SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg),
StartLoc(startLoc), EndLoc(endLoc) {}
/// \brief Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { }
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const {
return DG.isSingleDecl();
}
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
SourceLocation getStartLoc() const { return StartLoc; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
typedef DeclGroupRef::iterator decl_iterator;
typedef DeclGroupRef::const_iterator const_decl_iterator;
typedef llvm::iterator_range<decl_iterator> decl_range;
typedef llvm::iterator_range<const_decl_iterator> decl_const_range;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
SourceLocation SemiLoc;
/// \brief True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
bool HasLeadingEmptyMacro;
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass), SemiLoc(L),
HasLeadingEmptyMacro(hasLeadingEmptyMacro) {}
/// \brief Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty),
HasLeadingEmptyMacro(false) { }
SourceLocation getSemiLoc() const { return SemiLoc; }
void setSemiLoc(SourceLocation L) { SemiLoc = L; }
bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; }
SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() { return child_range(); }
friend class ASTStmtReader;
friend class ASTStmtWriter;
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
///
class CompoundStmt : public Stmt {
Stmt** Body;
SourceLocation LBraceLoc, RBraceLoc;
friend class ASTStmtReader;
public:
CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts,
SourceLocation LB, SourceLocation RB);
// \brief Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
}
// \brief Build an empty compound statement.
explicit CompoundStmt(EmptyShell Empty)
: Stmt(CompoundStmtClass, Empty), Body(nullptr) {
CompoundStmtBits.NumStmts = 0;
}
void setStmts(const ASTContext &C, Stmt **Stmts, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
typedef Stmt** body_iterator;
typedef llvm::iterator_range<body_iterator> body_range;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return Body; }
body_iterator body_end() { return Body + size(); }
Stmt *body_front() { return !body_empty() ? Body[0] : nullptr; }
Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; }
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
Body[size()-1] = S;
}
typedef Stmt* const * const_body_iterator;
typedef llvm::iterator_range<const_body_iterator> body_const_range;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const { return Body; }
const_body_iterator body_end() const { return Body + size(); }
const Stmt *body_front() const {
return !body_empty() ? Body[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? Body[size() - 1] : nullptr;
}
typedef std::reverse_iterator<body_iterator> reverse_body_iterator;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
typedef std::reverse_iterator<const_body_iterator>
const_reverse_body_iterator;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; }
SourceLocation getLBracLoc() const { return LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() {
return child_range(Body, Body + CompoundStmtBits.NumStmts);
}
const_child_range children() const {
return child_range(Body, Body + CompoundStmtBits.NumStmts);
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
// A pointer to the following CaseStmt or DefaultStmt class,
// used by SwitchStmt.
SwitchCase *NextSwitchCase;
SourceLocation KeywordLoc;
SourceLocation ColonLoc;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) {
}
SwitchCase(StmtClass SC, EmptyShell)
: Stmt(SC), NextSwitchCase(nullptr) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return KeywordLoc; }
void setKeywordLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase*>(this)->getSubStmt();
}
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
class CaseStmt : public SwitchCase {
SourceLocation EllipsisLoc;
enum { LHS, RHS, SUBSTMT, END_EXPR };
Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for
// GNU "case 1 ... 4" extension
public:
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
SubExprs[SUBSTMT] = nullptr;
SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs);
SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs);
EllipsisLoc = ellipsisLoc;
}
/// \brief Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { }
SourceLocation getCaseLoc() const { return KeywordLoc; }
void setCaseLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); }
Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); }
Stmt *getSubStmt() { return SubExprs[SUBSTMT]; }
const Expr *getLHS() const {
return reinterpret_cast<const Expr*>(SubExprs[LHS]);
}
const Expr *getRHS() const {
return reinterpret_cast<const Expr*>(SubExprs[RHS]);
}
const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; }
void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; }
void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); }
void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); }
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[END_EXPR]);
}
};
class DefaultStmt : public SwitchCase {
Stmt* SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) :
SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// \brief Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) { }
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return KeywordLoc; }
void setDefaultLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
};
inline SourceLocation SwitchCase::getLocEnd() const {
if (const CaseStmt *CS = dyn_cast<CaseStmt>(this))
return CS->getLocEnd();
return cast<DefaultStmt>(this)->getLocEnd();
}
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
///
class LabelStmt : public Stmt {
SourceLocation IdentLoc;
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) {
static_assert(sizeof(LabelStmt) ==
2 * sizeof(SourceLocation) + 2 * sizeof(void *),
"LabelStmt too big");
}
// \brief Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { }
SourceLocation getIdentLoc() const { return IdentLoc; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setIdentLoc(SourceLocation L) { IdentLoc = L; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// \brief Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
///
class AttributedStmt : public Stmt {
Stmt *SubStmt;
SourceLocation AttrLoc;
unsigned NumAttrs;
friend class ASTStmtReader;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt)
: Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc),
NumAttrs(Attrs.size()) {
memcpy(getAttrArrayPtr(), Attrs.data(), Attrs.size() * sizeof(Attr *));
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) {
memset(getAttrArrayPtr(), 0, NumAttrs * sizeof(Attr *));
}
Attr *const *getAttrArrayPtr() const {
return reinterpret_cast<Attr *const *>(this + 1);
}
Attr **getAttrArrayPtr() { return reinterpret_cast<Attr **>(this + 1); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr*> Attrs, Stmt *SubStmt);
// \brief Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttrLoc; }
ArrayRef<const Attr*> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
///
class IfStmt : public Stmt {
enum { VAR, COND, THEN, ELSE, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation IfLoc;
SourceLocation ElseLoc;
public:
IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
Stmt *then, SourceLocation EL = SourceLocation(),
Stmt *elsev = nullptr);
/// \brief Build an empty if/then/else statement
explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { }
/// \brief Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
const Stmt *getThen() const { return SubExprs[THEN]; }
void setThen(Stmt *S) { SubExprs[THEN] = S; }
const Stmt *getElse() const { return SubExprs[ELSE]; }
void setElse(Stmt *S) { SubExprs[ELSE] = S; }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Stmt *getThen() { return SubExprs[THEN]; }
Stmt *getElse() { return SubExprs[ELSE]; }
SourceLocation getIfLoc() const { return IfLoc; }
void setIfLoc(SourceLocation L) { IfLoc = L; }
SourceLocation getElseLoc() const { return ElseLoc; }
void setElseLoc(SourceLocation L) { ElseLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
if (SubExprs[ELSE])
return SubExprs[ELSE]->getLocEnd();
else
return SubExprs[THEN]->getLocEnd();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
///
class SwitchStmt : public Stmt {
SourceLocation SwitchLoc;
enum { VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
// This points to a linked list of case and default statements and, if the
// SwitchStmt is a switch on an enum value, records whether all the enum
// values were covered by CaseStmts. The coverage information value is meant
// to be a hint for possible clients.
llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase;
public:
SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond);
/// \brief Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { }
/// \brief Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Stmt *getBody() const { return SubExprs[BODY]; }
const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); }
/// \brief Set the case list for this switch statement.
void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); }
SourceLocation getSwitchLoc() const { return SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
SubExprs[BODY] = S;
SwitchLoc = SL;
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase()
&& "case/default already added to a switch");
SC->setNextSwitchCase(FirstCase.getPointer());
FirstCase.setPointer(SC);
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { FirstCase.setInt(true); }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const { return FirstCase.getInt(); }
SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd();
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
///
class WhileStmt : public Stmt {
SourceLocation WhileLoc;
enum { VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
public:
WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
SourceLocation WL);
/// \brief Build an empty while statement.
explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { }
/// \brief Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// DoStmt - This represents a 'do/while' stmt.
///
class DoStmt : public Stmt {
SourceLocation DoLoc;
enum { BODY, COND, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) {
SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
SubExprs[BODY] = body;
}
/// \brief Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getDoLoc() const { return DoLoc; }
void setDoLoc(SourceLocation L) { DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
///
class ForStmt : public Stmt {
SourceLocation ForLoc;
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// \brief Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { }
Stmt *getInit() { return SubExprs[INIT]; }
/// \brief Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForLoc; }
void setForLoc(SourceLocation L) { ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
///
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation GotoLoc;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {}
/// \brief Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { }
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() { return child_range(); }
};
/// IndirectGotoStmt - This represents an indirect goto.
///
class IndirectGotoStmt : public Stmt {
SourceLocation GotoLoc;
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc,
Expr *target)
: Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc),
Target((Stmt*)target) {}
/// \brief Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) { }
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr*>(Target); }
const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt*>(this)->getConstantTarget();
}
SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target+1); }
};
/// ContinueStmt - This represents a continue.
///
class ContinueStmt : public Stmt {
SourceLocation ContinueLoc;
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {}
/// \brief Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { }
SourceLocation getContinueLoc() const { return ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() { return child_range(); }
};
/// BreakStmt - This represents a break.
///
class BreakStmt : public Stmt {
SourceLocation BreakLoc;
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) {
static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation),
"BreakStmt too large");
}
/// \brief Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { }
SourceLocation getBreakLoc() const { return BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() { return child_range(); }
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
///
class ReturnStmt : public Stmt {
SourceLocation RetLoc;
Stmt *RetExpr;
const VarDecl *NRVOCandidate;
public:
explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {}
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate)
: Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E),
NRVOCandidate(NRVOCandidate) {}
/// \brief Build an empty return expression.
explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { }
const Expr *getRetValue() const;
Expr *getRetValue();
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); }
SourceLocation getReturnLoc() const { return RetLoc; }
void setReturnLoc(SourceLocation L) { RetLoc = L; }
/// \brief Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const { return NRVOCandidate; }
void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; }
SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return RetExpr ? RetExpr->getLocEnd() : RetLoc;
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr) return child_range(&RetExpr, &RetExpr+1);
return child_range();
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
///
class AsmStmt : public Stmt {
protected:
SourceLocation AsmLoc;
/// \brief True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// \brief If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers) :
Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { }
friend class ASTStmtReader;
public:
/// \brief Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) :
Stmt(SC, Empty), Exprs(nullptr) { }
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); }
SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
typedef ExprIterator inputs_iterator;
typedef ConstExprIterator const_inputs_iterator;
typedef llvm::iterator_range<inputs_iterator> inputs_range;
typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
typedef ExprIterator outputs_iterator;
typedef ConstExprIterator const_outputs_iterator;
typedef llvm::iterator_range<outputs_iterator> outputs_range;
typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
///
class GCCAsmStmt : public AsmStmt {
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints;
StringLiteral **Clobbers;
IdentifierInfo **Names;
friend class ASTStmtReader;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, SourceLocation rparenloc);
/// \brief Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty),
Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {
}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const {
return Str;
}
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const {
return Names[i];
}
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return StringRef();
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return StringRef();
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
///
class MSAsmStmt : public AsmStmt {
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks;
Token *AsmToks;
StringRef *Constraints;
StringRef *Clobbers;
friend class ASTStmtReader;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// \brief Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty),
NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { }
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
friend class ASTReader;
friend class ASTStmtReader;
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { }
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children,Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc,
Stmt *Block);
friend class ASTReader;
friend class ASTStmtReader;
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { }
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getLocEnd(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
friend class ASTReader;
friend class ASTStmtReader;
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { }
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children,Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
///
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// \brief Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { }
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() { return child_range(); }
};
/// \brief This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// \brief The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_VLAType,
};
/// \brief Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
/// \brief Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
///
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr)
: VarAndKind(Var, Kind), Loc(Loc) {
switch (Kind) {
case VCK_This:
assert(!Var && "'this' capture cannot have a variable!");
break;
case VCK_ByRef:
assert(Var && "capturing by reference must have a variable!");
break;
case VCK_VLAType:
assert(!Var &&
"Variable-length array type capture cannot have a variable!");
break;
}
}
/// \brief Determine the kind of capture.
VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); }
/// \brief Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// \brief Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// \brief Determine whether this capture handles a variable.
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// \brief Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// \brief Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const {
assert(capturesVariable() &&
"No variable available for 'this' or VAT capture");
return VarAndKind.getPointer();
}
friend class ASTStmtReader;
};
private:
/// \brief The number of variable captured, including 'this'.
unsigned NumCaptures;
/// \brief The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind;
/// \brief The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl;
/// \brief Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// \brief Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() const {
return reinterpret_cast<Stmt **>(const_cast<CapturedStmt *>(this) + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// \brief Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const {
return const_cast<CapturedStmt *>(this)->getCapturedStmt();
}
/// \brief Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); }
const CapturedDecl *getCapturedDecl() const {
return const_cast<CapturedStmt *>(this)->getCapturedDecl();
}
/// \brief Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D) {
assert(D && "null CapturedDecl");
CapDeclAndKind.setPointer(D);
}
/// \brief Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const {
return CapDeclAndKind.getInt();
}
/// \brief Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind) {
CapDeclAndKind.setInt(Kind);
}
/// \brief Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// \brief Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// \brief True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// \brief An iterator that walks over the captures.
typedef Capture *capture_iterator;
typedef const Capture *const_capture_iterator;
typedef llvm::iterator_range<capture_iterator> capture_range;
typedef llvm::iterator_range<const_capture_iterator> capture_const_range;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// \brief Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// \brief Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// \brief Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// \brief Iterator that walks over the capture initialization arguments.
typedef Expr **capture_init_iterator;
typedef llvm::iterator_range<capture_init_iterator> capture_init_range;
capture_init_range capture_inits() const {
return capture_init_range(capture_init_begin(), capture_init_end());
}
/// \brief Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr **>(getStoredStmts());
}
/// \brief Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getLocStart() const LLVM_READONLY {
return getCapturedStmt()->getLocStart();
}
SourceLocation getLocEnd() const LLVM_READONLY {
return getCapturedStmt()->getLocEnd();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
friend class ASTStmtReader;
};
} // end namespace clang
#endif
|
problem.p4.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void evaluateBeta(double x, double y, double z, double *B, double *Bx, double *By, double *Bz){
double Bmin = 1.0;
double Bmax = 10.0;
double c2 = (Bmax-Bmin)/2; // coefficients to affect this transition
double c1 = (Bmax+Bmin)/2;
double c3 = 10.0; // how sharply (B)eta transitions
double xcenter = 0.50;
double ycenter = 0.50;
double zcenter = 0.50;
// calculate distance from center of the domain (0.5,0.5,0.5)
double r2 = pow((x-xcenter),2) + pow((y-ycenter),2) + pow((z-zcenter),2);
double r2x = 2.0*(x-xcenter);
double r2y = 2.0*(y-ycenter);
double r2z = 2.0*(z-zcenter);
//double r2xx = 2.0;
//double r2yy = 2.0;
//double r2zz = 2.0;
double r = pow(r2,0.5);
double rx = 0.5*r2x*pow(r2,-0.5);
double ry = 0.5*r2y*pow(r2,-0.5);
double rz = 0.5*r2z*pow(r2,-0.5);
//double rxx = 0.5*r2xx*pow(r2,-0.5) - 0.25*r2x*r2x*pow(r2,-1.5);
//double ryy = 0.5*r2yy*pow(r2,-0.5) - 0.25*r2y*r2y*pow(r2,-1.5);
//double rzz = 0.5*r2zz*pow(r2,-0.5) - 0.25*r2z*r2z*pow(r2,-1.5);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*B = c1+c2*tanh( c3*(r-0.25) );
*Bx = c2*c3*rx*(1-pow(tanh( c3*(r-0.25) ),2));
*By = c2*c3*ry*(1-pow(tanh( c3*(r-0.25) ),2));
*Bz = c2*c3*rz*(1-pow(tanh( c3*(r-0.25) ),2));
}
//------------------------------------------------------------------------------------------------------------------------------
void evaluateU(double x, double y, double z, double *U, double *Ux, double *Uy, double *Uz, double *Uxx, double *Uyy, double *Uzz, int isPeriodic){
// should be continuous in u, u', and u''
// v(w) = w^4 - 2w^3 + w^2 + c
// u(x,y,z) = v(x)v(y)v(z)
// If Periodic, then the integral of the RHS should sum to zero.
// Setting shift=1/30 should ensure that the integrals of X, Y, or Z should sum to zero...
// That should(?) make the integrals of u,ux,uy,uz,uxx,uyy,uzz sum to zero and thus make the integral of f sum to zero
// If dirichlet, then w(0)=w(1) = 0.0
// Setting shift to 0 should ensure that U(x,y,z) = 0 on boundary
double shift = 0.0;if(isPeriodic)shift= -1.0/30.0;
double X = 1.0*pow(x,4) - 2.0*pow(x,3) + 1.0*pow(x,2) + shift;
double Y = 1.0*pow(y,4) - 2.0*pow(y,3) + 1.0*pow(y,2) + shift;
double Z = 1.0*pow(z,4) - 2.0*pow(z,3) + 1.0*pow(z,2) + shift;
double Xx = 4.0*pow(x,3) - 6.0*pow(x,2) + 2.0*x;
double Yy = 4.0*pow(y,3) - 6.0*pow(y,2) + 2.0*y;
double Zz = 4.0*pow(z,3) - 6.0*pow(z,2) + 2.0*z;
double Xxx = 12.0*pow(x,2) - 12.0*x + 2.0;
double Yyy = 12.0*pow(y,2) - 12.0*y + 2.0;
double Zzz = 12.0*pow(z,2) - 12.0*z + 2.0;
*U = X*Y*Z;
*Ux = Xx*Y*Z;
*Uy = X*Yy*Z;
*Uz = X*Y*Zz;
*Uxx = Xxx*Y*Z;
*Uyy = X*Yyy*Z;
*Uzz = X*Y*Zzz;
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_problem(level_type * level, double hLevel, double a, double b){
level->h = hLevel;
int box;
for(box=0;box<level->num_my_boxes;box++){
memset(level->my_boxes[box].vectors[VECTOR_ALPHA ],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_BETA_I],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_BETA_J],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_BETA_K],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_UTRUE ],0,level->my_boxes[box].volume*sizeof(double));
memset(level->my_boxes[box].vectors[VECTOR_F ],0,level->my_boxes[box].volume*sizeof(double));
int i,j,k;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int ghosts = level->my_boxes[box].ghosts;
const int dim_i = level->my_boxes[box].dim;
const int dim_j = level->my_boxes[box].dim;
const int dim_k = level->my_boxes[box].dim;
#ifdef _OPENMP
#pragma omp parallel for private(k,j,i) collapse(3)
#endif
for(k=0;k<=dim_k;k++){ // include high face
for(j=0;j<=dim_j;j++){ // include high face
for(i=0;i<=dim_i;i++){ // include high face
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// FIX... move to quadrature version to initialize the problem.
// i.e. the value of an array element is the average value of the function over the cell (finite volume)
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;
double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell
double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );
double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );
double A,B,Bx,By,Bz,Bi,Bj,Bk;
double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A = 1.0;
B = 1.0;
Bx = 0.0;
By = 0.0;
Bz = 0.0;
Bi = 1.0;
Bj = 1.0;
Bk = 1.0;
#ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...
evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i
evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j
evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k
evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) );
double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;
level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;
level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;
level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;
level->my_boxes[box].vectors[VECTOR_UTRUE ][ijk] = U;
level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
}}}
}
// quick test for Poisson...
if(level->alpha_is_zero==-1)level->alpha_is_zero = (dot(level,VECTOR_ALPHA,VECTOR_ALPHA) == 0.0);
}
//------------------------------------------------------------------------------------------------------------------------------
|
munit.c | /* Copyright (c) 2013-2018 Evan Nemerson <evan@nemerson.com>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*** Configuration ***/
/* This is just where the output from the test goes. It's really just
* meant to let you choose stdout or stderr, but if anyone really want
* to direct it to a file let me know, it would be fairly easy to
* support. */
#if !defined(MUNIT_OUTPUT_FILE)
# define MUNIT_OUTPUT_FILE stdout
#endif
/* This is a bit more useful; it tells µnit how to format the seconds in
* timed tests. If your tests run for longer you might want to reduce
* it, and if your computer is really fast and your tests are tiny you
* can increase it. */
#if !defined(MUNIT_TEST_TIME_FORMAT)
# define MUNIT_TEST_TIME_FORMAT "0.8f"
#endif
/* If you have long test names you might want to consider bumping
* this. The result information takes 43 characters. */
#if !defined(MUNIT_TEST_NAME_LEN)
# define MUNIT_TEST_NAME_LEN 37
#endif
/* If you don't like the timing information, you can disable it by
* defining MUNIT_DISABLE_TIMING. */
#if !defined(MUNIT_DISABLE_TIMING)
# define MUNIT_ENABLE_TIMING
#endif
/*** End configuration ***/
#if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L)
# undef _POSIX_C_SOURCE
#endif
#if !defined(_POSIX_C_SOURCE)
# define _POSIX_C_SOURCE 200809L
#endif
/* Solaris freaks out if you try to use a POSIX or SUS standard without
* the "right" C standard. */
#if defined(_XOPEN_SOURCE)
# undef _XOPEN_SOURCE
#endif
#if defined(__STDC_VERSION__)
# if __STDC_VERSION__ >= 201112L
# define _XOPEN_SOURCE 700
# elif __STDC_VERSION__ >= 199901L
# define _XOPEN_SOURCE 600
# endif
#endif
/* Because, according to Microsoft, POSIX is deprecated. You've got
* to appreciate the chutzpah. */
#if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE)
# define _CRT_NONSTDC_NO_DEPRECATE
#endif
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
# include <stdbool.h>
#elif defined(_WIN32)
/* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */
#endif
#include <limits.h>
#include <time.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <setjmp.h>
#if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32)
#define MUNIT_NL_LANGINFO
#include <locale.h>
#include <langinfo.h>
#include <strings.h>
#endif
#if !defined(_WIN32)
# include <unistd.h>
# include <sys/types.h>
# include <sys/wait.h>
#else
# include <windows.h>
# include <io.h>
# include <fcntl.h>
# if !defined(STDERR_FILENO)
# define STDERR_FILENO _fileno(stderr)
# endif
#endif
#include "munit.h"
#define MUNIT_STRINGIFY(x) #x
#define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x)
#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__)
# define MUNIT_THREAD_LOCAL __thread
#elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local)
# define MUNIT_THREAD_LOCAL _Thread_local
#elif defined(_WIN32)
# define MUNIT_THREAD_LOCAL __declspec(thread)
#endif
/* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... }
* while (0)', or 'do { ... } while (1)'. I'm pretty sure nobody
* at Microsoft compiles with /W4. */
#if defined(_MSC_VER) && (_MSC_VER <= 1800)
#pragma warning(disable: 4127)
#endif
#if defined(_WIN32) || defined(__EMSCRIPTEN__)
# define MUNIT_NO_FORK
#endif
#if defined(__EMSCRIPTEN__)
# define MUNIT_NO_BUFFER
#endif
/*** Logging ***/
static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO;
static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR;
#if defined(MUNIT_THREAD_LOCAL)
static MUNIT_THREAD_LOCAL munit_bool munit_error_jmp_buf_valid = 0;
static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf;
#endif
/* At certain warning levels, mingw will trigger warnings about
* suggesting the format attribute, which we've explicity *not* set
* because it will then choke on our attempts to use the MS-specific
* I64 modifier for size_t (which we have to use since MSVC doesn't
* support the C99 z modifier). */
#if defined(__MINGW32__) || defined(__MINGW64__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
#endif
MUNIT_PRINTF(5,0)
static void
munit_logf_exv(MunitLogLevel level, FILE* fp, const char* filename, int line, const char* format, va_list ap) {
if (level < munit_log_level_visible)
return;
switch (level) {
case MUNIT_LOG_DEBUG:
fputs("Debug", fp);
break;
case MUNIT_LOG_INFO:
fputs("Info", fp);
break;
case MUNIT_LOG_WARNING:
fputs("Warning", fp);
break;
case MUNIT_LOG_ERROR:
fputs("Error", fp);
break;
default:
munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level);
return;
}
fputs(": ", fp);
if (filename != NULL)
fprintf(fp, "%s:%d: ", filename, line);
vfprintf(fp, format, ap);
fputc('\n', fp);
}
MUNIT_PRINTF(3,4)
static void
munit_logf_internal(MunitLogLevel level, FILE* fp, const char* format, ...) {
va_list ap;
va_start(ap, format);
munit_logf_exv(level, fp, NULL, 0, format, ap);
va_end(ap);
}
static void
munit_log_internal(MunitLogLevel level, FILE* fp, const char* message) {
munit_logf_internal(level, fp, "%s", message);
}
void
munit_logf_ex(MunitLogLevel level, const char* filename, int line, const char* format, ...) {
va_list ap;
va_start(ap, format);
munit_logf_exv(level, stderr, filename, line, format, ap);
va_end(ap);
if (level >= munit_log_level_fatal) {
#if defined(MUNIT_THREAD_LOCAL)
if (munit_error_jmp_buf_valid)
longjmp(munit_error_jmp_buf, 1);
#endif
abort();
}
}
void
munit_errorf_ex(const char* filename, int line, const char* format, ...) {
va_list ap;
va_start(ap, format);
munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap);
va_end(ap);
#if defined(MUNIT_THREAD_LOCAL)
if (munit_error_jmp_buf_valid)
longjmp(munit_error_jmp_buf, 1);
#endif
abort();
}
#if defined(__MINGW32__) || defined(__MINGW64__)
#pragma GCC diagnostic pop
#endif
#if !defined(MUNIT_STRERROR_LEN)
# define MUNIT_STRERROR_LEN 80
#endif
static void
munit_log_errno(MunitLogLevel level, FILE* fp, const char* msg) {
#if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API))
munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno);
#else
char munit_error_str[MUNIT_STRERROR_LEN];
munit_error_str[0] = '\0';
#if !defined(_WIN32)
strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN);
#else
strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno);
#endif
munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno);
#endif
}
/*** Memory allocation ***/
void*
munit_malloc_ex(const char* filename, int line, size_t size) {
void* ptr;
if (size == 0)
return NULL;
ptr = calloc(1, size);
if (MUNIT_UNLIKELY(ptr == NULL)) {
munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size);
}
return ptr;
}
/*** Timer code ***/
#if defined(MUNIT_ENABLE_TIMING)
#define psnip_uint64_t munit_uint64_t
#define psnip_uint32_t munit_uint32_t
/* Code copied from portable-snippets
* <https://github.com/nemequ/portable-snippets/>. If you need to
* change something, please do it there so we can keep the code in
* sync. */
/* Clocks (v1)
* Portable Snippets - https://gitub.com/nemequ/portable-snippets
* Created by Evan Nemerson <evan@nemerson.com>
*
* To the extent possible under law, the authors have waived all
* copyright and related or neighboring rights to this code. For
* details, see the Creative Commons Zero 1.0 Universal license at
* https://creativecommons.org/publicdomain/zero/1.0/
*/
#if !defined(PSNIP_CLOCK_H)
#define PSNIP_CLOCK_H
#if !defined(psnip_uint64_t)
# include "../exact-int/exact-int.h"
#endif
#if !defined(PSNIP_CLOCK_STATIC_INLINE)
# if defined(__GNUC__)
# define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__))
# else
# define PSNIP_CLOCK__COMPILER_ATTRIBUTES
# endif
# define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static
#endif
enum PsnipClockType {
/* This clock provides the current time, in units since 1970-01-01
* 00:00:00 UTC not including leap seconds. In other words, UNIX
* time. Keep in mind that this clock doesn't account for leap
* seconds, and can go backwards (think NTP adjustments). */
PSNIP_CLOCK_TYPE_WALL = 1,
/* The CPU time is a clock which increases only when the current
* process is active (i.e., it doesn't increment while blocking on
* I/O). */
PSNIP_CLOCK_TYPE_CPU = 2,
/* Monotonic time is always running (unlike CPU time), but it only
ever moves forward unless you reboot the system. Things like NTP
adjustments have no effect on this clock. */
PSNIP_CLOCK_TYPE_MONOTONIC = 3
};
struct PsnipClockTimespec {
psnip_uint64_t seconds;
psnip_uint64_t nanoseconds;
};
/* Methods we support: */
#define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1
#define PSNIP_CLOCK_METHOD_TIME 2
#define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3
#define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4
#define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5
#define PSNIP_CLOCK_METHOD_CLOCK 6
#define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7
#define PSNIP_CLOCK_METHOD_GETRUSAGE 8
#define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9
#define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10
#include <assert.h>
#if defined(HEDLEY_UNREACHABLE)
# define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE()
#else
# define PSNIP_CLOCK_UNREACHABLE() assert(0)
#endif
/* Choose an implementation */
/* #undef PSNIP_CLOCK_WALL_METHOD */
/* #undef PSNIP_CLOCK_CPU_METHOD */
/* #undef PSNIP_CLOCK_MONOTONIC_METHOD */
/* We want to be able to detect the libc implementation, so we include
<limits.h> (<features.h> isn't available everywhere). */
#if defined(__unix__) || defined(__unix) || defined(__linux__)
# include <limits.h>
# include <unistd.h>
#endif
#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0)
/* These are known to work without librt. If you know of others
* please let us know so we can add them. */
# if \
(defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \
(defined(__FreeBSD__))
# define PSNIP_CLOCK_HAVE_CLOCK_GETTIME
# elif !defined(PSNIP_CLOCK_NO_LIBRT)
# define PSNIP_CLOCK_HAVE_CLOCK_GETTIME
# endif
#endif
#if defined(_WIN32)
# if !defined(PSNIP_CLOCK_CPU_METHOD)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES
# endif
# if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER
# endif
#endif
#if defined(__MACH__) && !defined(__gnu_hurd__)
# if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME
# endif
#endif
#if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME)
# include <time.h>
# if !defined(PSNIP_CLOCK_WALL_METHOD)
# if defined(CLOCK_REALTIME_PRECISE)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE
# elif !defined(__sun)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME
# endif
# endif
# if !defined(PSNIP_CLOCK_CPU_METHOD)
# if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID
# elif defined(CLOCK_VIRTUAL)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL
# endif
# endif
# if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
# if defined(CLOCK_MONOTONIC_RAW)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC
# elif defined(CLOCK_MONOTONIC_PRECISE)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE
# elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC
# endif
# endif
#endif
#if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L)
# if !defined(PSNIP_CLOCK_WALL_METHOD)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY
# endif
#endif
#if !defined(PSNIP_CLOCK_WALL_METHOD)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME
#endif
#if !defined(PSNIP_CLOCK_CPU_METHOD)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK
#endif
/* Primarily here for testing. */
#if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC)
# error No monotonic clock found.
#endif
/* Implementations */
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME))
# include <time.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY))
# include <sys/time.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64))
# include <windows.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE))
# include <sys/time.h>
# include <sys/resource.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME))
# include <CoreServices/CoreServices.h>
# include <mach/mach.h>
# include <mach/mach_time.h>
#endif
/*** Implementations ***/
#define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL))
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME))
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock__clock_getres (clockid_t clk_id) {
struct timespec res;
int r;
r = clock_getres(clk_id, &res);
if (r != 0)
return 0;
return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec);
}
PSNIP_CLOCK__FUNCTION int
psnip_clock__clock_gettime (clockid_t clk_id, struct PsnipClockTimespec* res) {
struct timespec ts;
if (clock_gettime(clk_id, &ts) != 0)
return -10;
res->seconds = (psnip_uint64_t) (ts.tv_sec);
res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec);
return 0;
}
#endif
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_wall_get_precision (void) {
#if !defined(PSNIP_CLOCK_WALL_METHOD)
return 0;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL);
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY
return 1000000;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME
return 1;
#else
return 0;
#endif
}
PSNIP_CLOCK__FUNCTION int
psnip_clock_wall_get_time (struct PsnipClockTimespec* res) {
(void) res;
#if !defined(PSNIP_CLOCK_WALL_METHOD)
return -2;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res);
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME
res->seconds = time(NULL);
res->nanoseconds = 0;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY
struct timeval tv;
if (gettimeofday(&tv, NULL) != 0)
return -6;
res->seconds = tv.tv_sec;
res->nanoseconds = tv.tv_usec * 1000;
#else
return -2;
#endif
return 0;
}
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_cpu_get_precision (void) {
#if !defined(PSNIP_CLOCK_CPU_METHOD)
return 0;
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU);
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK
return CLOCKS_PER_SEC;
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES
return PSNIP_CLOCK_NSEC_PER_SEC / 100;
#else
return 0;
#endif
}
PSNIP_CLOCK__FUNCTION int
psnip_clock_cpu_get_time (struct PsnipClockTimespec* res) {
#if !defined(PSNIP_CLOCK_CPU_METHOD)
(void) res;
return -2;
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res);
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK
clock_t t = clock();
if (t == ((clock_t) -1))
return -5;
res->seconds = t / CLOCKS_PER_SEC;
res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC);
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES
FILETIME CreationTime, ExitTime, KernelTime, UserTime;
LARGE_INTEGER date, adjust;
if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime))
return -7;
/* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */
date.HighPart = UserTime.dwHighDateTime;
date.LowPart = UserTime.dwLowDateTime;
adjust.QuadPart = 11644473600000 * 10000;
date.QuadPart -= adjust.QuadPart;
res->seconds = date.QuadPart / 10000000;
res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100);
#elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage) != 0)
return -8;
res->seconds = usage.ru_utime.tv_sec;
res->nanoseconds = tv.tv_usec * 1000;
#else
(void) res;
return -2;
#endif
return 0;
}
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_monotonic_get_precision (void) {
#if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
return 0;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC);
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME
static mach_timebase_info_data_t tbi = { 0, };
if (tbi.denom == 0)
mach_timebase_info(&tbi);
return (psnip_uint32_t) (tbi.numer / tbi.denom);
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64
return 1000;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER
LARGE_INTEGER Frequency;
QueryPerformanceFrequency(&Frequency);
return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart);
#else
return 0;
#endif
}
PSNIP_CLOCK__FUNCTION int
psnip_clock_monotonic_get_time (struct PsnipClockTimespec* res) {
#if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
(void) res;
return -2;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res);
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME
psnip_uint64_t nsec = mach_absolute_time();
static mach_timebase_info_data_t tbi = { 0, };
if (tbi.denom == 0)
mach_timebase_info(&tbi);
nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom);
res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC;
res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER
LARGE_INTEGER t, f;
if (QueryPerformanceCounter(&t) == 0)
return -12;
QueryPerformanceFrequency(&f);
res->seconds = t.QuadPart / f.QuadPart;
res->nanoseconds = t.QuadPart % f.QuadPart;
if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC)
res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC;
else
res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64
const ULONGLONG msec = GetTickCount64();
res->seconds = msec / 1000;
res->nanoseconds = sec % 1000;
#else
return -2;
#endif
return 0;
}
/* Returns the number of ticks per second for the specified clock.
* For example, a clock with millisecond precision would return 1000,
* and a clock with 1 second (such as the time() function) would
* return 1.
*
* If the requested clock isn't available, it will return 0.
* Hopefully this will be rare, but if it happens to you please let us
* know so we can work on finding a way to support your system.
*
* Note that different clocks on the same system often have a
* different precisions.
*/
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_get_precision (enum PsnipClockType clock_type) {
switch (clock_type) {
case PSNIP_CLOCK_TYPE_MONOTONIC:
return psnip_clock_monotonic_get_precision ();
case PSNIP_CLOCK_TYPE_CPU:
return psnip_clock_cpu_get_precision ();
case PSNIP_CLOCK_TYPE_WALL:
return psnip_clock_wall_get_precision ();
}
PSNIP_CLOCK_UNREACHABLE();
return 0;
}
/* Set the provided timespec to the requested time. Returns 0 on
* success, or a negative value on failure. */
PSNIP_CLOCK__FUNCTION int
psnip_clock_get_time (enum PsnipClockType clock_type, struct PsnipClockTimespec* res) {
assert(res != NULL);
switch (clock_type) {
case PSNIP_CLOCK_TYPE_MONOTONIC:
return psnip_clock_monotonic_get_time (res);
case PSNIP_CLOCK_TYPE_CPU:
return psnip_clock_cpu_get_time (res);
case PSNIP_CLOCK_TYPE_WALL:
return psnip_clock_wall_get_time (res);
}
return -1;
}
#endif /* !defined(PSNIP_CLOCK_H) */
static psnip_uint64_t
munit_clock_get_elapsed(struct PsnipClockTimespec* start, struct PsnipClockTimespec* end) {
psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC;
if (end->nanoseconds < start->nanoseconds) {
r -= (start->nanoseconds - end->nanoseconds);
} else {
r += (end->nanoseconds - start->nanoseconds);
}
return r;
}
#else
# include <time.h>
#endif /* defined(MUNIT_ENABLE_TIMING) */
/*** PRNG stuff ***/
/* This is (unless I screwed up, which is entirely possible) the
* version of PCG with 32-bit state. It was chosen because it has a
* small enough state that we should reliably be able to use CAS
* instead of requiring a lock for thread-safety.
*
* If I did screw up, I probably will not bother changing it unless
* there is a significant bias. It's really not important this be
* particularly strong, as long as it is fairly random it's much more
* important that it be reproducible, so bug reports have a better
* chance of being reproducible. */
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) && (!defined(__GNUC_MINOR__) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8))
# define HAVE_STDATOMIC
#elif defined(__clang__)
# if __has_extension(c_atomic)
# define HAVE_CLANG_ATOMICS
# endif
#endif
/* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */
#if defined(__clang__) && defined(_WIN32)
# undef HAVE_STDATOMIC
# if defined(__c2__)
# undef HAVE_CLANG_ATOMICS
# endif
#endif
#if defined(_OPENMP)
# define ATOMIC_UINT32_T uint32_t
# define ATOMIC_UINT32_INIT(x) (x)
#elif defined(HAVE_STDATOMIC)
# include <stdatomic.h>
# define ATOMIC_UINT32_T _Atomic uint32_t
# define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x)
#elif defined(HAVE_CLANG_ATOMICS)
# define ATOMIC_UINT32_T _Atomic uint32_t
# define ATOMIC_UINT32_INIT(x) (x)
#elif defined(_WIN32)
# define ATOMIC_UINT32_T volatile LONG
# define ATOMIC_UINT32_INIT(x) (x)
#else
# define ATOMIC_UINT32_T volatile uint32_t
# define ATOMIC_UINT32_INIT(x) (x)
#endif
static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42);
#if defined(_OPENMP)
static inline void
munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) {
#pragma omp critical (munit_atomics)
*dest = value;
}
static inline uint32_t
munit_atomic_load(ATOMIC_UINT32_T* src) {
int ret;
#pragma omp critical (munit_atomics)
ret = *src;
return ret;
}
static inline uint32_t
munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) {
munit_bool ret;
#pragma omp critical (munit_atomics)
{
if (*dest == *expected) {
*dest = desired;
ret = 1;
} else {
ret = 0;
}
}
return ret;
}
#elif defined(HAVE_STDATOMIC)
# define munit_atomic_store(dest, value) atomic_store(dest, value)
# define munit_atomic_load(src) atomic_load(src)
# define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value)
#elif defined(HAVE_CLANG_ATOMICS)
# define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST)
# define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST)
# define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)
# define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST)
# define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST)
# define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#elif defined(__GNUC__) && (__GNUC__ >= 4)
# define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0)
# define munit_atomic_load(src) (*(src))
# define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value)
#elif defined(_WIN32) /* Untested */
# define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0)
# define munit_atomic_load(src) (*(src))
# define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected))
#else
# warning No atomic implementation, PRNG will not be thread-safe
# define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0)
# define munit_atomic_load(src) (*(src))
static inline munit_bool
munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) {
if (*dest == *expected) {
*dest = desired;
return 1;
} else {
return 0;
}
}
#endif
#define MUNIT_PRNG_MULTIPLIER (747796405U)
#define MUNIT_PRNG_INCREMENT (1729U)
static munit_uint32_t
munit_rand_next_state(munit_uint32_t state) {
return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT;
}
static munit_uint32_t
munit_rand_from_state(munit_uint32_t state) {
munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U);
res ^= res >> 22;
return res;
}
void
munit_rand_seed(munit_uint32_t seed) {
munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT);
munit_atomic_store(&munit_rand_state, state);
}
static munit_uint32_t
munit_rand_generate_seed(void) {
munit_uint32_t seed, state;
#if defined(MUNIT_ENABLE_TIMING)
struct PsnipClockTimespec wc = { 0, };
psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc);
seed = (munit_uint32_t) wc.nanoseconds;
#else
seed = (munit_uint32_t) time(NULL);
#endif
state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT);
return munit_rand_from_state(state);
}
static munit_uint32_t
munit_rand_state_uint32(munit_uint32_t* state) {
const munit_uint32_t old = *state;
*state = munit_rand_next_state(old);
return munit_rand_from_state(old);
}
munit_uint32_t
munit_rand_uint32(void) {
munit_uint32_t old, state;
do {
old = munit_atomic_load(&munit_rand_state);
state = munit_rand_next_state(old);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
return munit_rand_from_state(old);
}
static void
munit_rand_state_memory(munit_uint32_t* state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) {
size_t members_remaining = size / sizeof(munit_uint32_t);
size_t bytes_remaining = size % sizeof(munit_uint32_t);
munit_uint8_t* b = data;
munit_uint32_t rv;
while (members_remaining-- > 0) {
rv = munit_rand_state_uint32(state);
memcpy(b, &rv, sizeof(munit_uint32_t));
b += sizeof(munit_uint32_t);
}
if (bytes_remaining != 0) {
rv = munit_rand_state_uint32(state);
memcpy(b, &rv, bytes_remaining);
}
}
void
munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) {
munit_uint32_t old, state;
do {
state = old = munit_atomic_load(&munit_rand_state);
munit_rand_state_memory(&state, size, data);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
}
static munit_uint32_t
munit_rand_state_at_most(munit_uint32_t* state, munit_uint32_t salt, munit_uint32_t max) {
/* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same
* as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not
* to avoid compiler warnings.
*/
const munit_uint32_t min = (~max + 1U) % max;
munit_uint32_t x;
if (max == (~((munit_uint32_t) 0U)))
return munit_rand_state_uint32(state) ^ salt;
max++;
do {
x = munit_rand_state_uint32(state) ^ salt;
} while (x < min);
return x % max;
}
static munit_uint32_t
munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) {
munit_uint32_t old, state;
munit_uint32_t retval;
do {
state = old = munit_atomic_load(&munit_rand_state);
retval = munit_rand_state_at_most(&state, salt, max);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
return retval;
}
int
munit_rand_int_range(int min, int max) {
munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min;
if (min > max)
return munit_rand_int_range(max, min);
if (range > (~((munit_uint32_t) 0U)))
range = (~((munit_uint32_t) 0U));
return min + munit_rand_at_most(0, (munit_uint32_t) range);
}
double
munit_rand_double(void) {
munit_uint32_t old, state;
double retval = 0.0;
do {
state = old = munit_atomic_load(&munit_rand_state);
/* See http://mumble.net/~campbell/tmp/random_real.c for how to do
* this right. Patches welcome if you feel that this is too
* biased. */
retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
return retval;
}
/*** Test suite handling ***/
typedef struct {
unsigned int successful;
unsigned int skipped;
unsigned int failed;
unsigned int errored;
#if defined(MUNIT_ENABLE_TIMING)
munit_uint64_t cpu_clock;
munit_uint64_t wall_clock;
#endif
} MunitReport;
typedef struct {
const char* prefix;
const MunitSuite* suite;
const char** tests;
munit_uint32_t seed;
unsigned int iterations;
MunitParameter* parameters;
munit_bool single_parameter_mode;
void* user_data;
MunitReport report;
munit_bool colorize;
munit_bool fork;
munit_bool show_stderr;
munit_bool fatal_failures;
} MunitTestRunner;
const char*
munit_parameters_get(const MunitParameter params[], const char* key) {
const MunitParameter* param;
for (param = params ; param != NULL && param->name != NULL ; param++)
if (strcmp(param->name, key) == 0)
return param->value;
return NULL;
}
#if defined(MUNIT_ENABLE_TIMING)
static void
munit_print_time(FILE* fp, munit_uint64_t nanoseconds) {
fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double) nanoseconds) / ((double) PSNIP_CLOCK_NSEC_PER_SEC));
}
#endif
/* Add a paramter to an array of parameters. */
static MunitResult
munit_parameters_add(size_t* params_size, MunitParameter* params[MUNIT_ARRAY_PARAM(*params_size)], char* name, char* value) {
*params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2));
if (*params == NULL)
return MUNIT_ERROR;
(*params)[*params_size].name = name;
(*params)[*params_size].value = value;
(*params_size)++;
(*params)[*params_size].name = NULL;
(*params)[*params_size].value = NULL;
return MUNIT_OK;
}
/* Concatenate two strings, but just return one of the components
* unaltered if the other is NULL or "". */
static char*
munit_maybe_concat(size_t* len, char* prefix, char* suffix) {
char* res;
size_t res_l;
const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0;
const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0;
if (prefix_l == 0 && suffix_l == 0) {
res = NULL;
res_l = 0;
} else if (prefix_l == 0 && suffix_l != 0) {
res = suffix;
res_l = suffix_l;
} else if (prefix_l != 0 && suffix_l == 0) {
res = prefix;
res_l = prefix_l;
} else {
res_l = prefix_l + suffix_l;
res = malloc(res_l + 1);
memcpy(res, prefix, prefix_l);
memcpy(res + prefix_l, suffix, suffix_l);
res[res_l] = 0;
}
if (len != NULL)
*len = res_l;
return res;
}
/* Possbily free a string returned by munit_maybe_concat. */
static void
munit_maybe_free_concat(char* s, const char* prefix, const char* suffix) {
if (prefix != s && suffix != s)
free(s);
}
/* Cheap string hash function, just used to salt the PRNG. */
static munit_uint32_t
munit_str_hash(const char* name) {
const char *p;
munit_uint32_t h = 5381U;
for (p = name; *p != '\0'; p++)
h = (h << 5) + h + *p;
return h;
}
static void
munit_splice(int from, int to) {
munit_uint8_t buf[1024];
#if !defined(_WIN32)
ssize_t len;
ssize_t bytes_written;
ssize_t write_res;
#else
int len;
int bytes_written;
int write_res;
#endif
do {
len = read(from, buf, sizeof(buf));
if (len > 0) {
bytes_written = 0;
do {
write_res = write(to, buf + bytes_written, len - bytes_written);
if (write_res < 0)
break;
bytes_written += write_res;
} while (bytes_written < len);
}
else
break;
} while (1);
}
/* This is the part that should be handled in the child process */
static MunitResult
munit_test_runner_exec(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[], MunitReport* report) {
unsigned int iterations = runner->iterations;
MunitResult result = MUNIT_FAIL;
#if defined(MUNIT_ENABLE_TIMING)
struct PsnipClockTimespec wall_clock_begin = { 0, }, wall_clock_end = { 0, };
struct PsnipClockTimespec cpu_clock_begin = { 0, }, cpu_clock_end = { 0, };
#endif
unsigned int i = 0;
if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION)
iterations = 1;
else if (iterations == 0)
iterations = runner->suite->iterations;
munit_rand_seed(runner->seed);
do {
void* data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data);
#if defined(MUNIT_ENABLE_TIMING)
psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin);
psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin);
#endif
result = test->test(params, data);
#if defined(MUNIT_ENABLE_TIMING)
psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end);
psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end);
#endif
if (test->tear_down != NULL)
test->tear_down(data);
if (MUNIT_LIKELY(result == MUNIT_OK)) {
report->successful++;
#if defined(MUNIT_ENABLE_TIMING)
report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end);
report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end);
#endif
} else {
switch ((int) result) {
case MUNIT_SKIP:
report->skipped++;
break;
case MUNIT_FAIL:
report->failed++;
break;
case MUNIT_ERROR:
report->errored++;
break;
default:
break;
}
break;
}
} while (++i < iterations);
return result;
}
#if defined(MUNIT_EMOTICON)
# define MUNIT_RESULT_STRING_OK ":)"
# define MUNIT_RESULT_STRING_SKIP ":|"
# define MUNIT_RESULT_STRING_FAIL ":("
# define MUNIT_RESULT_STRING_ERROR ":o"
# define MUNIT_RESULT_STRING_TODO ":/"
#else
# define MUNIT_RESULT_STRING_OK "OK "
# define MUNIT_RESULT_STRING_SKIP "SKIP "
# define MUNIT_RESULT_STRING_FAIL "FAIL "
# define MUNIT_RESULT_STRING_ERROR "ERROR"
# define MUNIT_RESULT_STRING_TODO "TODO "
#endif
static void
munit_test_runner_print_color(const MunitTestRunner* runner, const char* string, char color) {
if (runner->colorize)
fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string);
else
fputs(string, MUNIT_OUTPUT_FILE);
}
#if !defined(MUNIT_NO_BUFFER)
static int
munit_replace_stderr(FILE* stderr_buf) {
if (stderr_buf != NULL) {
const int orig_stderr = dup(STDERR_FILENO);
int errfd = fileno(stderr_buf);
if (MUNIT_UNLIKELY(errfd == -1)) {
exit(EXIT_FAILURE);
}
dup2(errfd, STDERR_FILENO);
return orig_stderr;
}
return -1;
}
static void
munit_restore_stderr(int orig_stderr) {
if (orig_stderr != -1) {
dup2(orig_stderr, STDERR_FILENO);
close(orig_stderr);
}
}
#endif /* !defined(MUNIT_NO_BUFFER) */
/* Run a test with the specified parameters. */
static void
munit_test_runner_run_test_with_params(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[]) {
MunitResult result = MUNIT_OK;
MunitReport report = {
0, 0, 0, 0,
#if defined(MUNIT_ENABLE_TIMING)
0, 0
#endif
};
unsigned int output_l;
munit_bool first;
const MunitParameter* param;
FILE* stderr_buf;
#if !defined(MUNIT_NO_FORK)
int pipefd[2];
pid_t fork_pid;
int orig_stderr;
ssize_t bytes_written = 0;
ssize_t write_res;
ssize_t bytes_read = 0;
ssize_t read_res;
int status = 0;
pid_t changed_pid;
#endif
if (params != NULL) {
output_l = 2;
fputs(" ", MUNIT_OUTPUT_FILE);
first = 1;
for (param = params ; param != NULL && param->name != NULL ; param++) {
if (!first) {
fputs(", ", MUNIT_OUTPUT_FILE);
output_l += 2;
} else {
first = 0;
}
output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value);
}
while (output_l++ < MUNIT_TEST_NAME_LEN) {
fputc(' ', MUNIT_OUTPUT_FILE);
}
}
fflush(MUNIT_OUTPUT_FILE);
stderr_buf = NULL;
#if !defined(_WIN32) || defined(__MINGW32__)
stderr_buf = tmpfile();
#else
tmpfile_s(&stderr_buf);
#endif
if (stderr_buf == NULL) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr");
result = MUNIT_ERROR;
goto print_result;
}
#if !defined(MUNIT_NO_FORK)
if (runner->fork) {
pipefd[0] = -1;
pipefd[1] = -1;
if (pipe(pipefd) != 0) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe");
result = MUNIT_ERROR;
goto print_result;
}
fork_pid = fork();
if (fork_pid == 0) {
close(pipefd[0]);
orig_stderr = munit_replace_stderr(stderr_buf);
munit_test_runner_exec(runner, test, params, &report);
/* Note that we don't restore stderr. This is so we can buffer
* things written to stderr later on (such as by
* asan/tsan/ubsan, valgrind, etc.) */
close(orig_stderr);
do {
write_res = write(pipefd[1], ((munit_uint8_t*) (&report)) + bytes_written, sizeof(report) - bytes_written);
if (write_res < 0) {
if (stderr_buf != NULL) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe");
}
exit(EXIT_FAILURE);
}
bytes_written += write_res;
} while ((size_t) bytes_written < sizeof(report));
if (stderr_buf != NULL)
fclose(stderr_buf);
close(pipefd[1]);
exit(EXIT_SUCCESS);
} else if (fork_pid == -1) {
close(pipefd[0]);
close(pipefd[1]);
if (stderr_buf != NULL) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork");
}
report.errored++;
result = MUNIT_ERROR;
} else {
close(pipefd[1]);
do {
read_res = read(pipefd[0], ((munit_uint8_t*) (&report)) + bytes_read, sizeof(report) - bytes_read);
if (read_res < 1)
break;
bytes_read += read_res;
} while (bytes_read < (ssize_t) sizeof(report));
changed_pid = waitpid(fork_pid, &status, 0);
if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) {
if (bytes_read != sizeof(report)) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status));
report.errored++;
} else if (WEXITSTATUS(status) != EXIT_SUCCESS) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status));
report.errored++;
}
} else {
if (WIFSIGNALED(status)) {
#if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700)
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status)));
#else
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status));
#endif
} else if (WIFSTOPPED(status)) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status));
}
report.errored++;
}
close(pipefd[0]);
waitpid(fork_pid, NULL, 0);
}
} else
#endif
{
#if !defined(MUNIT_NO_BUFFER)
const volatile int orig_stderr = munit_replace_stderr(stderr_buf);
#endif
#if defined(MUNIT_THREAD_LOCAL)
if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) {
result = MUNIT_FAIL;
report.failed++;
} else {
munit_error_jmp_buf_valid = 1;
result = munit_test_runner_exec(runner, test, params, &report);
}
#else
result = munit_test_runner_exec(runner, test, params, &report);
#endif
#if !defined(MUNIT_NO_BUFFER)
munit_restore_stderr(orig_stderr);
#endif
/* Here just so that the label is used on Windows and we don't get
* a warning */
goto print_result;
}
print_result:
fputs("[ ", MUNIT_OUTPUT_FILE);
if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) {
if (report.failed != 0 || report.errored != 0 || report.skipped != 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3');
result = MUNIT_OK;
} else {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1');
if (MUNIT_LIKELY(stderr_buf != NULL))
munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful.");
runner->report.failed++;
result = MUNIT_ERROR;
}
} else if (report.failed > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1');
runner->report.failed++;
result = MUNIT_FAIL;
} else if (report.errored > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1');
runner->report.errored++;
result = MUNIT_ERROR;
} else if (report.skipped > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3');
runner->report.skipped++;
result = MUNIT_SKIP;
} else if (report.successful > 1) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2');
#if defined(MUNIT_ENABLE_TIMING)
fputs(" ] [ ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful);
fputs(" / ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful);
fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", "");
munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock);
fputs(" / ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock);
fputs(" CPU", MUNIT_OUTPUT_FILE);
#endif
runner->report.successful++;
result = MUNIT_OK;
} else if (report.successful > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2');
#if defined(MUNIT_ENABLE_TIMING)
fputs(" ] [ ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock);
fputs(" / ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock);
fputs(" CPU", MUNIT_OUTPUT_FILE);
#endif
runner->report.successful++;
result = MUNIT_OK;
}
fputs(" ]\n", MUNIT_OUTPUT_FILE);
if (stderr_buf != NULL) {
if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) {
fflush(MUNIT_OUTPUT_FILE);
rewind(stderr_buf);
munit_splice(fileno(stderr_buf), STDERR_FILENO);
fflush(stderr);
}
fclose(stderr_buf);
}
}
static void
munit_test_runner_run_test_wild(MunitTestRunner* runner,
const MunitTest* test,
const char* test_name,
MunitParameter* params,
MunitParameter* p) {
const MunitParameterEnum* pe;
char** values;
MunitParameter* next;
for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) {
if (p->name == pe->name)
break;
}
if (pe == NULL)
return;
for (values = pe->values ; *values != NULL ; values++) {
next = p + 1;
p->value = *values;
if (next->name == NULL) {
munit_test_runner_run_test_with_params(runner, test, params);
} else {
munit_test_runner_run_test_wild(runner, test, test_name, params, next);
}
if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0))
break;
}
}
/* Run a single test, with every combination of parameters
* requested. */
static void
munit_test_runner_run_test(MunitTestRunner* runner,
const MunitTest* test,
const char* prefix) {
char* test_name = munit_maybe_concat(NULL, (char*) prefix, (char*) test->name);
/* The array of parameters to pass to
* munit_test_runner_run_test_with_params */
MunitParameter* params = NULL;
size_t params_l = 0;
/* Wildcard parameters are parameters which have possible values
* specified in the test, but no specific value was passed to the
* CLI. That means we want to run the test once for every
* possible combination of parameter values or, if --single was
* passed to the CLI, a single time with a random set of
* parameters. */
MunitParameter* wild_params = NULL;
size_t wild_params_l = 0;
const MunitParameterEnum* pe;
const MunitParameter* cli_p;
munit_bool filled;
unsigned int possible;
char** vals;
size_t first_wild;
const MunitParameter* wp;
int pidx;
munit_rand_seed(runner->seed);
fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name);
if (test->parameters == NULL) {
/* No parameters. Simple, nice. */
munit_test_runner_run_test_with_params(runner, test, NULL);
} else {
fputc('\n', MUNIT_OUTPUT_FILE);
for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) {
/* Did we received a value for this parameter from the CLI? */
filled = 0;
for (cli_p = runner->parameters ; cli_p != NULL && cli_p->name != NULL ; cli_p++) {
if (strcmp(cli_p->name, pe->name) == 0) {
if (MUNIT_UNLIKELY(munit_parameters_add(¶ms_l, ¶ms, pe->name, cli_p->value) != MUNIT_OK))
goto cleanup;
filled = 1;
break;
}
}
if (filled)
continue;
/* Nothing from CLI, is the enum NULL/empty? We're not a
* fuzzer… */
if (pe->values == NULL || pe->values[0] == NULL)
continue;
/* If --single was passed to the CLI, choose a value from the
* list of possibilities randomly. */
if (runner->single_parameter_mode) {
possible = 0;
for (vals = pe->values ; *vals != NULL ; vals++)
possible++;
/* We want the tests to be reproducible, even if you're only
* running a single test, but we don't want every test with
* the same number of parameters to choose the same parameter
* number, so use the test name as a primitive salt. */
pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1);
if (MUNIT_UNLIKELY(munit_parameters_add(¶ms_l, ¶ms, pe->name, pe->values[pidx]) != MUNIT_OK))
goto cleanup;
} else {
/* We want to try every permutation. Put in a placeholder
* entry, we'll iterate through them later. */
if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK))
goto cleanup;
}
}
if (wild_params_l != 0) {
first_wild = params_l;
for (wp = wild_params ; wp != NULL && wp->name != NULL ; wp++) {
for (pe = test->parameters ; pe != NULL && pe->name != NULL && pe->values != NULL ; pe++) {
if (strcmp(wp->name, pe->name) == 0) {
if (MUNIT_UNLIKELY(munit_parameters_add(¶ms_l, ¶ms, pe->name, pe->values[0]) != MUNIT_OK))
goto cleanup;
}
}
}
munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild);
} else {
munit_test_runner_run_test_with_params(runner, test, params);
}
cleanup:
free(params);
free(wild_params);
}
munit_maybe_free_concat(test_name, prefix, test->name);
}
/* Recurse through the suite and run all the tests. If a list of
* tests to run was provied on the command line, run only those
* tests. */
static void
munit_test_runner_run_suite(MunitTestRunner* runner,
const MunitSuite* suite,
const char* prefix) {
size_t pre_l;
char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix);
const MunitTest* test;
const char** test_name;
const MunitSuite* child_suite;
/* Run the tests. */
for (test = suite->tests ; test != NULL && test->test != NULL ; test++) {
if (runner->tests != NULL) { /* Specific tests were requested on the CLI */
for (test_name = runner->tests ; test_name != NULL && *test_name != NULL ; test_name++) {
if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) &&
strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) {
munit_test_runner_run_test(runner, test, pre);
if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0))
goto cleanup;
}
}
} else { /* Run all tests */
munit_test_runner_run_test(runner, test, pre);
}
}
if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0))
goto cleanup;
/* Run any child suites. */
for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) {
munit_test_runner_run_suite(runner, child_suite, pre);
}
cleanup:
munit_maybe_free_concat(pre, prefix, suite->prefix);
}
static void
munit_test_runner_run(MunitTestRunner* runner) {
munit_test_runner_run_suite(runner, runner->suite, NULL);
}
static void
munit_print_help(int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], void* user_data, const MunitArgument arguments[]) {
const MunitArgument* arg;
(void) argc;
printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]);
puts(" --seed SEED\n"
" Value used to seed the PRNG. Must be a 32-bit integer in decimal\n"
" notation with no separators (commas, decimals, spaces, etc.), or\n"
" hexidecimal prefixed by \"0x\".\n"
" --iterations N\n"
" Run each test N times. 0 means the default number.\n"
" --param name value\n"
" A parameter key/value pair which will be passed to any test with\n"
" takes a parameter of that name. If not provided, the test will be\n"
" run once for each possible parameter value.\n"
" --list Write a list of all available tests.\n"
" --list-params\n"
" Write a list of all available tests and their possible parameters.\n"
" --single Run each parameterized test in a single configuration instead of\n"
" every possible combination\n"
" --log-visible debug|info|warning|error\n"
" --log-fatal debug|info|warning|error\n"
" Set the level at which messages of different severities are visible,\n"
" or cause the test to terminate.\n"
#if !defined(MUNIT_NO_FORK)
" --no-fork Do not execute tests in a child process. If this option is supplied\n"
" and a test crashes (including by failing an assertion), no further\n"
" tests will be performed.\n"
#endif
" --fatal-failures\n"
" Stop executing tests as soon as a failure is found.\n"
" --show-stderr\n"
" Show data written to stderr by the tests, even if the test succeeds.\n"
" --color auto|always|never\n"
" Colorize (or don't) the output.\n"
/* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */
" --help Print this help message and exit.\n");
#if defined(MUNIT_NL_LANGINFO)
setlocale(LC_ALL, "");
fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout);
#else
puts("munit");
#endif
printf(" %d.%d.%d\n"
"Full documentation at: https://nemequ.github.io/munit/\n",
(MUNIT_CURRENT_VERSION >> 16) & 0xff,
(MUNIT_CURRENT_VERSION >> 8) & 0xff,
(MUNIT_CURRENT_VERSION >> 0) & 0xff);
for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++)
arg->write_help(arg, user_data);
}
static const MunitArgument*
munit_arguments_find(const MunitArgument arguments[], const char* name) {
const MunitArgument* arg;
for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++)
if (strcmp(arg->name, name) == 0)
return arg;
return NULL;
}
static void
munit_suite_list_tests(const MunitSuite* suite, munit_bool show_params, const char* prefix) {
size_t pre_l;
char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix);
const MunitTest* test;
const MunitParameterEnum* params;
munit_bool first;
char** val;
const MunitSuite* child_suite;
for (test = suite->tests ;
test != NULL && test->name != NULL ;
test++) {
if (pre != NULL)
fputs(pre, stdout);
puts(test->name);
if (show_params) {
for (params = test->parameters ;
params != NULL && params->name != NULL ;
params++) {
fprintf(stdout, " - %s: ", params->name);
if (params->values == NULL) {
puts("Any");
} else {
first = 1;
for (val = params->values ;
*val != NULL ;
val++ ) {
if(!first) {
fputs(", ", stdout);
} else {
first = 0;
}
fputs(*val, stdout);
}
putc('\n', stdout);
}
}
}
}
for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) {
munit_suite_list_tests(child_suite, show_params, pre);
}
munit_maybe_free_concat(pre, prefix, suite->prefix);
}
static munit_bool
munit_stream_supports_ansi(FILE *stream) {
#if !defined(_WIN32)
return isatty(fileno(stream));
#else
#if !defined(__MINGW32__)
size_t ansicon_size = 0;
#endif
if (isatty(fileno(stream))) {
#if !defined(__MINGW32__)
getenv_s(&ansicon_size, NULL, 0, "ANSICON");
return ansicon_size != 0;
#else
return getenv("ANSICON") != NULL;
#endif
}
return 0;
#endif
}
int
munit_suite_main_custom(const MunitSuite* suite, void* user_data,
int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)],
const MunitArgument arguments[]) {
int result = EXIT_FAILURE;
MunitTestRunner runner;
size_t parameters_size = 0;
size_t tests_size = 0;
int arg;
char* envptr;
unsigned long ts;
char* endptr;
unsigned long long iterations;
MunitLogLevel level;
const MunitArgument* argument;
const char** runner_tests;
unsigned int tests_run;
unsigned int tests_total;
runner.prefix = NULL;
runner.suite = NULL;
runner.tests = NULL;
runner.seed = 0;
runner.iterations = 0;
runner.parameters = NULL;
runner.single_parameter_mode = 0;
runner.user_data = NULL;
runner.report.successful = 0;
runner.report.skipped = 0;
runner.report.failed = 0;
runner.report.errored = 0;
#if defined(MUNIT_ENABLE_TIMING)
runner.report.cpu_clock = 0;
runner.report.wall_clock = 0;
#endif
runner.colorize = 0;
#if !defined(_WIN32)
runner.fork = 1;
#else
runner.fork = 0;
#endif
runner.show_stderr = 0;
runner.fatal_failures = 0;
runner.suite = suite;
runner.user_data = user_data;
runner.seed = munit_rand_generate_seed();
runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE);
for (arg = 1 ; arg < argc ; arg++) {
if (strncmp("--", argv[arg], 2) == 0) {
if (strcmp("seed", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
envptr = argv[arg + 1];
ts = strtoul(argv[arg + 1], &envptr, 0);
if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
runner.seed = (munit_uint32_t) ts;
arg++;
} else if (strcmp("iterations", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
endptr = argv[arg + 1];
iterations = strtoul(argv[arg + 1], &endptr, 0);
if (*endptr != '\0' || iterations > UINT_MAX) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
runner.iterations = (unsigned int) iterations;
arg++;
} else if (strcmp("param", argv[arg] + 2) == 0) {
if (arg + 2 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]);
goto cleanup;
}
runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2));
if (runner.parameters == NULL) {
munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory");
goto cleanup;
}
runner.parameters[parameters_size].name = (char*) argv[arg + 1];
runner.parameters[parameters_size].value = (char*) argv[arg + 2];
parameters_size++;
runner.parameters[parameters_size].name = NULL;
runner.parameters[parameters_size].value = NULL;
arg += 2;
} else if (strcmp("color", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
if (strcmp(argv[arg + 1], "always") == 0)
runner.colorize = 1;
else if (strcmp(argv[arg + 1], "never") == 0)
runner.colorize = 0;
else if (strcmp(argv[arg + 1], "auto") == 0)
runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE);
else {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
arg++;
} else if (strcmp("help", argv[arg] + 2) == 0) {
munit_print_help(argc, argv, user_data, arguments);
result = EXIT_SUCCESS;
goto cleanup;
} else if (strcmp("single", argv[arg] + 2) == 0) {
runner.single_parameter_mode = 1;
} else if (strcmp("show-stderr", argv[arg] + 2) == 0) {
runner.show_stderr = 1;
#if !defined(_WIN32)
} else if (strcmp("no-fork", argv[arg] + 2) == 0) {
runner.fork = 0;
#endif
} else if (strcmp("fatal-failures", argv[arg] + 2) == 0) {
runner.fatal_failures = 1;
} else if (strcmp("log-visible", argv[arg] + 2) == 0 ||
strcmp("log-fatal", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
if (strcmp(argv[arg + 1], "debug") == 0)
level = MUNIT_LOG_DEBUG;
else if (strcmp(argv[arg + 1], "info") == 0)
level = MUNIT_LOG_INFO;
else if (strcmp(argv[arg + 1], "warning") == 0)
level = MUNIT_LOG_WARNING;
else if (strcmp(argv[arg + 1], "error") == 0)
level = MUNIT_LOG_ERROR;
else {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
if (strcmp("log-visible", argv[arg] + 2) == 0)
munit_log_level_visible = level;
else
munit_log_level_fatal = level;
arg++;
} else if (strcmp("list", argv[arg] + 2) == 0) {
munit_suite_list_tests(suite, 0, NULL);
result = EXIT_SUCCESS;
goto cleanup;
} else if (strcmp("list-params", argv[arg] + 2) == 0) {
munit_suite_list_tests(suite, 1, NULL);
result = EXIT_SUCCESS;
goto cleanup;
} else {
argument = munit_arguments_find(arguments, argv[arg] + 2);
if (argument == NULL) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]);
goto cleanup;
}
if (!argument->parse_argument(suite, user_data, &arg, argc, argv))
goto cleanup;
}
} else {
runner_tests = realloc((void*) runner.tests, sizeof(char*) * (tests_size + 2));
if (runner_tests == NULL) {
munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory");
goto cleanup;
}
runner.tests = runner_tests;
runner.tests[tests_size++] = argv[arg];
runner.tests[tests_size] = NULL;
}
}
fflush(stderr);
fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed);
munit_test_runner_run(&runner);
tests_run = runner.report.successful + runner.report.failed + runner.report.errored;
tests_total = tests_run + runner.report.skipped;
if (tests_run == 0) {
fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped);
} else {
fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n",
runner.report.successful, tests_run,
(((double) runner.report.successful) / ((double) tests_run)) * 100.0,
runner.report.skipped,
(((double) runner.report.skipped) / ((double) tests_total)) * 100.0);
}
if (runner.report.failed == 0 && runner.report.errored == 0) {
result = EXIT_SUCCESS;
}
cleanup:
free(runner.parameters);
free((void*) runner.tests);
return result;
}
int
munit_suite_main(const MunitSuite* suite, void* user_data,
int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)]) {
return munit_suite_main_custom(suite, user_data, argc, argv, NULL);
} |
GB_unop__identity_uint16_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint16_fc64
// op(A') function: GB_unop_tran__identity_uint16_fc64
// C type: uint16_t
// A type: GxB_FC64_t
// cast: uint16_t cij = GB_cast_to_uint16_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = GB_cast_to_uint16_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = GB_cast_to_uint16_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint16_fc64
(
uint16_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
uint16_t z = GB_cast_to_uint16_t (creal (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
uint16_t z = GB_cast_to_uint16_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint16_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__ceil_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ceil_fp64_fp64)
// op(A') function: GB (_unop_tran__ceil_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = ceil (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ceil (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = ceil (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CEIL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ceil_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = ceil (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = ceil (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ceil_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_int16_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int16_fp32
// op(A') function: GB_tran__lnot_int16_fp32
// C type: int16_t
// A type: float
// cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
int16_t z ; GB_CAST_SIGNED(z,aij,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int16_fp32
(
int16_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int16_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
som.h | #ifndef SYCL_ML_LIB_SOM_H
#define SYCL_ML_LIB_SOM_H
#endif //SYCL_ML_LIB_SOM_H
#include <limits>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <random>
#include <omp.h>
#include <atomic>
#include <cassert>
#include "Device.h"
class competitiveLearning{
public:
competitiveLearning(int feature_dim, int output_dim){
this->input_dim = feature_dim;
this->output_dim = output_dim;
std::random_device device{};
std::normal_distribution<float> distribution{0, 1};
std::ranlux48 generator{device()};
for(int i=0; i<output_dim; i++){
weights.push_back((float*)malloc(feature_dim * sizeof(float)));
#pragma omp parallel for
for(int j=0; j<this->input_dim; j++){
*(this->weights.at(i) + j) = distribution(generator);
}
}
}
template<typename criterion_func>
void forward(float* datapoint, float lr, criterion_func criterion){
int minPos = -1;
float minCriterion = std::numeric_limits<float>::max();
for(int i = 0; i<this->output_dim; i++){
auto distance = std::abs(criterion(datapoint, this->weights.at(i)));
if(distance < minCriterion){
minPos = i;
minCriterion = distance;
}
}
assert(minPos != -1);
#pragma omp parallel for simd
for(int i=0; i<this->input_dim; i++)
*(this->weights.at(minPos) + i) = *(this->weights.at(minPos) + i) + lr*minCriterion;
}
private:
int input_dim;
int output_dim;
std::vector<float*> weights;
}; |
collector-example.c | /* Collector API Usage Example.
*
* © 2017 ETH Zurich,
* [Integrated System Laboratory, D-ITET],
* [Antonio Libri, a.libri@iis.ee.ethz.ch]
*
* © 2017 University of Bologna,
* [Department of Electrical, Electronic and Information Engineering, DEI],
* [Andrea Bartolini, a.bartolini@unibo.it]
*/
#include "collector.h"
#include <signal.h>
/*
* Collector macro.
*
* Note. The topics below listen to the pow_pkg and pow_dram metrics
* of all CPU sockets in the compute node.
*
*/
#define TOPIC_POWER_PKG "org/myorg/cluster/testcluster/node/%s/plugin/pmu_pub/chnl/data/cpu/+/pow_pkg"
#define TOPIC_POWER_DRAM "org/myorg/cluster/testcluster/node/%s/plugin/pmu_pub/chnl/data/cpu/+/pow_dram"
#define MQTT_BROKER_IP "127.0.0.1"
#define MQTT_PORT 1883 // Default MQTT port
/*
* Select number of CPU sockets in the compute node.
*/
#define NCPU 2
static volatile int keepRunning = 1;
void intHandler(int dummy) { keepRunning = 0;}
int main(int argc, char *argv[])
{
volatile int count=0;
char *brokerIp = MQTT_BROKER_IP;
int port = MQTT_PORT, i=0;
struct collector_val pow_pkg = { NULL, NULL, false, 0, 0, 0, {0}, {0} };
struct collector_val pow_dram = { NULL, NULL, false, 0, 0, 0, {0}, {0} };
/*
* Select MQTT TOPIC
*/
pow_pkg.mqtt_topic = (char *)malloc(256*sizeof(char));
pow_dram.mqtt_topic = (char *)malloc(256*sizeof(char));
sprintf(pow_pkg.mqtt_topic, TOPIC_POWER_PKG, argv[1]);
sprintf(pow_dram.mqtt_topic, TOPIC_POWER_DRAM, argv[1]);
/*
* Init collectors and subscribe to the topic.
*/
if(collector_init(&pow_pkg, brokerIp, port)){
fprintf(stderr, "[Collector]: Init PowerPkg error.\n");
return 1;
}
if(collector_init(&pow_dram, brokerIp, port)){
fprintf(stderr, "[Collector]: Init PowerDram error.\n");
return 1;
}
/*
* Start monitoring the metrics.
*/
if(collector_start(&pow_pkg)){
fprintf(stderr, "[Collector]: Start PowerPkg error.\n");
return 1;
}
if(collector_start(&pow_dram)){
fprintf(stderr, "[Collector]: Start PowerDram error.\n");
return 1;
}
signal(SIGINT, intHandler);
/*
* Stress all cores.
*/
#pragma omp parallel
while(keepRunning)
count++;
/*
* Get mean value and continue the monitoring.
*/
if(collector_get(&pow_pkg)){
fprintf(stderr, "[Collector]: Get PowerPkg error.\n");
return 1;
}
if(collector_get(&pow_dram)){
fprintf(stderr, "[Collector]: Get PowerDram error.\n");
return 1;
}
printf("\nTstart=%ld.%06ld[s], Tend=%ld.%06ld[s], MeanPowerPkg=%f[W]"
"\nTstart=%ld.%06ld[s], Tend=%ld.%06ld[s], MeanPowerDram=%f[W]\n",
pow_pkg.start.tv_sec, pow_pkg.start.tv_usec,
pow_pkg.end.tv_sec, pow_pkg.end.tv_usec, pow_pkg.mean_val*NCPU,
pow_dram.start.tv_sec, pow_dram.start.tv_usec,
pow_dram.end.tv_sec, pow_dram.end.tv_usec, pow_dram.mean_val*NCPU);
keepRunning=1;
/*
* Stress all cores.
*/
#pragma omp parallel
while(keepRunning)
count++;
/*
* End monitoring and get mean value.
*/
if(collector_end(&pow_pkg)){
fprintf(stderr, "[Collector]: End PowerPkg error.\n");
return 1;
}
if(collector_end(&pow_dram)){
fprintf(stderr, "[Collector]: End PowerDram error.\n");
return 1;
}
printf("\nTstart=%ld.%06ld[s], Tend=%ld.%06ld[s], MeanPowerPkg=%f[W]"
"\nTstart=%ld.%06ld[s], Tend=%ld.%06ld[s], MeanPowerDram=%f[W]\n",
pow_pkg.start.tv_sec, pow_pkg.start.tv_usec,
pow_pkg.end.tv_sec, pow_pkg.end.tv_usec, pow_pkg.mean_val*NCPU,
pow_dram.start.tv_sec, pow_dram.start.tv_usec,
pow_dram.end.tv_sec, pow_dram.end.tv_usec, pow_dram.mean_val*NCPU);
/*
* Cleanup collectors.
*/
if(collector_clean(&pow_pkg)){
fprintf(stderr, "[Collector]: Clean PowerPkg error.\n");
return 1;
}
if(collector_clean(&pow_dram)){
fprintf(stderr, "[Collector]: Clean PowerDram error.\n");
return 1;
}
return 0;
}
|
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/threshold.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory(
(size_t) (width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]+=(double) (1.0-normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const Quantum
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i;
ssize_t
center,
j;
j=(ssize_t) ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5);
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const double
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(blur_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sharp_image=CloneImage(image,0,0,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]=(double) ((-2.0)*normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
sharp_view=AcquireAuthenticCacheView(sharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sharp_image,sharp_image->rows,1)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
register const Quantum
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i;
ssize_t
center,
j;
j=(ssize_t) ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5);
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
sharp_traits,
traits;
register const double
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
sharp_traits=GetPixelChannelTraits(sharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(sharp_traits == UndefinedPixelTrait))
continue;
if ((sharp_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(sharp_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((sharp_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(sharp_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateBlurImage(image,radius,sigma,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,
const KernelInfo *kernel_info,ExceptionInfo *exception)
{
Image
*convolve_image;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
convolve_image=AccelerateConvolveImage(image,kernel_info,exception);
if (convolve_image != (Image *) NULL)
return(convolve_image);
#endif
convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,
exception);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image. A speckle removing filter uses a complementary
% hulling technique (raising pixels that are darker than their surrounding
% neighbors, then complementarily lowering pixels that are brighter than their
% surrounding neighbors) to reduce the speckle index of that image (reference
% Crimmins speckle removal).
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g)
{
register Quantum
*p,
*q,
*r,
*s;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickRealType
v;
register ssize_t
i,
x;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2)))
v-=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*((ssize_t) columns+2)+x_offset);
s=q-(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
register ssize_t
i,
x;
MagickRealType
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] > v))
v+=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] < v))
v-=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
MemoryInfo
*buffer_info,
*pixel_info;
Quantum
*magick_restrict buffer,
*magick_restrict pixels;
register ssize_t
i;
size_t
length;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
despeckle_image=AccelerateDespeckleImage(image,exception);
if (despeckle_image != (Image *) NULL)
return(despeckle_image);
#endif
despeckle_image=CloneImage(image,0,0,MagickTrue,exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(despeckle_image,DirectClass,exception);
if (status == MagickFalse)
{
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffer.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
buffer_info=AcquireVirtualMemory(length,sizeof(*buffer));
if ((pixel_info == (MemoryInfo *) NULL) ||
(buffer_info == (MemoryInfo *) NULL))
{
if (buffer_info != (MemoryInfo *) NULL)
buffer_info=RelinquishVirtualMemory(buffer_info);
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info);
buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info);
/*
Reduce speckle in the image.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
despeckle_traits,
traits;
register ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
despeckle_traits=GetPixelChannelTraits(despeckle_image,channel);
if ((traits == UndefinedPixelTrait) ||
(despeckle_traits == UndefinedPixelTrait))
continue;
if ((despeckle_traits & CopyPixelTrait) != 0)
continue;
(void) memset(pixels,0,length*sizeof(*pixels));
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[j++]=p[i];
p+=GetPixelChannels(image);
}
j++;
}
(void) memset(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer);
Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelChannel(despeckle_image,channel,pixels[j++],q);
q+=GetPixelChannels(despeckle_image);
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffer_info=RelinquishVirtualMemory(buffer_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (kernel_info->width-1)/2;
kernel_info->y=(ssize_t) (kernel_info->height-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]=(-1.0);
kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0;
edge_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*emboss_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->width*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
j=(ssize_t) (kernel_info->width-1)/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 :
8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel_info->values[i]=0.0;
i++;
}
k--;
}
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
emboss_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (emboss_image != (Image *) NULL)
(void) EqualizeImage(emboss_image,exception);
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K u w a h a r a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KuwaharaImage() is an edge preserving noise reduction filter.
%
% The format of the KuwaharaImage method is:
%
% Image *KuwaharaImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the square window radius.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickRealType GetMeanLuma(const Image *magick_restrict image,
const double *magick_restrict pixel)
{
return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */
}
MagickExport Image *KuwaharaImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define KuwaharaImageTag "Kuwahara/Image"
CacheView
*image_view,
*kuwahara_view;
Image
*gaussian_image,
*kuwahara_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
y;
/*
Initialize Kuwahara image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) radius+1;
gaussian_image=BlurImage(image,radius,sigma,exception);
if (gaussian_image == (Image *) NULL)
return((Image *) NULL);
kuwahara_image=CloneImage(image,0,0,MagickTrue,exception);
if (kuwahara_image == (Image *) NULL)
{
gaussian_image=DestroyImage(gaussian_image);
return((Image *) NULL);
}
if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse)
{
gaussian_image=DestroyImage(gaussian_image);
kuwahara_image=DestroyImage(kuwahara_image);
return((Image *) NULL);
}
/*
Edge preserving noise reduction filter.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(gaussian_image,exception);
kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,kuwahara_image,gaussian_image->rows,1)
#endif
for (y=0; y < (ssize_t) gaussian_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) gaussian_image->columns; x++)
{
const Quantum
*magick_restrict p;
double
min_variance;
RectangleInfo
quadrant,
target;
register size_t
i;
min_variance=MagickMaximumValue;
SetGeometry(gaussian_image,&target);
quadrant.width=width;
quadrant.height=width;
for (i=0; i < 4; i++)
{
const Quantum
*magick_restrict k;
double
mean[MaxPixelChannels],
variance;
register ssize_t
n;
ssize_t
j;
quadrant.x=x;
quadrant.y=y;
switch (i)
{
case 0:
{
quadrant.x=x-(ssize_t) (width-1);
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 1:
{
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 2:
{
quadrant.x=x-(ssize_t) (width-1);
break;
}
case 3:
default:
break;
}
p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y,
quadrant.width,quadrant.height,exception);
if (p == (const Quantum *) NULL)
break;
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]=0.0;
k=p;
for (n=0; n < (ssize_t) (width*width); n++)
{
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]+=(double) k[j];
k+=GetPixelChannels(gaussian_image);
}
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]/=(double) (width*width);
k=p;
variance=0.0;
for (n=0; n < (ssize_t) (width*width); n++)
{
double
luma;
luma=GetPixelLuma(gaussian_image,k);
variance+=(luma-GetMeanLuma(gaussian_image,mean))*
(luma-GetMeanLuma(gaussian_image,mean));
k+=GetPixelChannels(gaussian_image);
}
if (variance < min_variance)
{
min_variance=variance;
target=quadrant;
}
}
if (i < 4)
{
status=MagickFalse;
break;
}
status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image,
UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double)
target.y+target.height/2.0,q,exception);
if (status == MagickFalse)
break;
q+=GetPixelChannels(kuwahara_image);
}
if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
kuwahara_view=DestroyCacheView(kuwahara_view);
image_view=DestroyCacheView(image_view);
gaussian_image=DestroyImage(gaussian_image);
if (status == MagickFalse)
kuwahara_image=DestroyImage(kuwahara_image);
return(kuwahara_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L o c a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LocalContrastImage() attempts to increase the appearance of large-scale
% light-dark transitions. Local contrast enhancement works similarly to
% sharpening with an unsharp mask, however the mask is instead created using
% an image with a greater blur distance.
%
% The format of the LocalContrastImage method is:
%
% Image *LocalContrastImage(const Image *image, const double radius,
% const double strength,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian blur, in percentage with 100%
% resulting in a blur radius of 20% of largest dimension.
%
% o strength: the strength of the blur mask in percentage.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LocalContrastImage(const Image *image,const double radius,
const double strength,ExceptionInfo *exception)
{
#define LocalContrastImageTag "LocalContrast/Image"
CacheView
*image_view,
*contrast_view;
float
*interImage,
*scanLinePixels,
totalWeight;
Image
*contrast_image;
MagickBooleanType
status;
MemoryInfo
*scanLinePixels_info,
*interImage_info;
ssize_t
scanLineSize,
width;
/*
Initialize contrast image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception);
if (contrast_image != (Image *) NULL)
return(contrast_image);
#endif
contrast_image=CloneImage(image,0,0,MagickTrue,exception);
if (contrast_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse)
{
contrast_image=DestroyImage(contrast_image);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(image,exception);
contrast_view=AcquireAuthenticCacheView(contrast_image,exception);
scanLineSize=(ssize_t) MagickMax(image->columns,image->rows);
width=(ssize_t) scanLineSize*0.002f*fabs(radius);
scanLineSize+=(2*width);
scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()*
scanLineSize,sizeof(*scanLinePixels));
if (scanLinePixels_info == (MemoryInfo *) NULL)
{
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info);
/*
Create intermediate buffer.
*/
interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)),
sizeof(*interImage));
if (interImage_info == (MemoryInfo *) NULL)
{
scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
interImage=(float *) GetVirtualMemoryBlob(interImage_info);
totalWeight=(float) ((width+1)*(width+1));
/*
Vertical pass.
*/
status=MagickTrue;
{
ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*out,
*pix,
*pixels;
register ssize_t
y;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanLinePixels;
pixels+=id*scanLineSize;
pix=pixels;
p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width),
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) image->rows+(2*width); y++)
{
*pix++=(float)GetPixelLuma(image,p);
p+=image->number_channels;
}
out=interImage+x+width;
for (y=0; y < (ssize_t) image->rows; y++)
{
float
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+y;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* write to output */
*out=sum/totalWeight;
/* mirror into padding */
if (x <= width && x != 0)
*(out-(x*2))=*out;
if ((x > (ssize_t) image->columns-width-2) &&
(x != (ssize_t) image->columns-1))
*(out+((image->columns-x-1)*2))=*out;
out+=image->columns+(width*2);
}
}
}
/*
Horizontal pass.
*/
{
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*pix,
*pixels;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanLinePixels;
pixels+=id*scanLineSize;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+
(2*width))*sizeof(float));
for (x=0; x < (ssize_t) image->columns; x++)
{
float
mult,
srcVal,
sum,
weight;
PixelTrait
traits;
weight=1.0f;
sum=0;
pix=pixels+x;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* Apply and write */
srcVal=(float) GetPixelLuma(image,p);
mult=(srcVal-(sum/totalWeight))*(strength/100.0f);
mult=(srcVal+mult)/srcVal;
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult),
q);
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)*
mult),q);
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)*
mult),q);
p+=image->number_channels;
q+=contrast_image->number_channels;
}
if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse)
status=MagickFalse;
}
}
scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info);
interImage_info=RelinquishVirtualMemory(interImage_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
contrast_image=DestroyImage(contrast_image);
return(contrast_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickRealType *GetMotionBlurKernel(const size_t width,
const double sigma)
{
MagickRealType
*kernel,
normalize;
register ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view,
*motion_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
OffsetInfo
*offset;
PointInfo
point;
register ssize_t
i;
size_t
width;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (i=0; i < (ssize_t) width; i++)
{
offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5);
offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5);
}
/*
Motion blur image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception);
if (blur_image != (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return(blur_image);
}
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
motion_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const Quantum
*magick_restrict r;
register MagickRealType
*magick_restrict k;
register ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
k=kernel;
pixel=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+
offset[j].y,1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=(*k)*r[i];
k++;
}
SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q);
continue;
}
alpha=0.0;
gamma=0.0;
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1,
1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,r));
pixel+=(*k)*alpha*r[i];
gamma+=(*k)*alpha;
k++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
motion_view=DestroyCacheView(motion_view);
image_view=DestroyCacheView(image_view);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MagickPathExtent],
label[MagickPathExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
register ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception);
if (i == (NumberTiles/2))
{
(void) QueryColorCompliance("#dfdfdf",AllCompliance,
&thumbnail->matte_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees,
2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImage(preview_image,gamma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,"colors %.20g",
(double) colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t)
radius,(size_t) radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MagickPathExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MagickPathExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MagickPathExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MagickPathExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"laplacian",MagickPathExtent);
break;
}
case 6:
{
(void) CopyMagickString(factor,"Poisson",MagickPathExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g",
radius,sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,(double) (percentage*((double)
QuantumRange+1.0))/100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"threshold %g",
(double) (percentage*((double) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,image->interpolate,radius,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*percentage/
100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees,
degrees);
break;
}
case RaisePreview:
{
RectangleInfo
raise;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
raise.width=(size_t) (2*i+2);
raise.height=(size_t) (2*i+2);
raise.x=(i-1)/2;
raise.y=(i-1)/2;
(void) RaiseImage(preview_image,&raise,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double)
raise.height,(double) raise.x,(double) raise.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold,
threshold,exception);
(void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,
image->interpolate,exception);
(void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5*
degrees,2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case JPEGPreview:
{
char
filename[MagickPathExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatLocaleString(preview_image->filename,MagickPathExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image,exception);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MagickPathExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%.20gb ",factor,(double) ((MagickOffsetType)
GetBlobSize(thumbnail)));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label,exception);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,
MagickPathExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t i o n a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotationalBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RotationalBlurImage method is:
%
% Image *RotationalBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o angle: the angle of the radial blur.
%
% o blur: the blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotationalBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view,
*radial_view;
double
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
blur_center;
register ssize_t
i;
size_t
n;
ssize_t
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateRotationalBlurImage(image,angle,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
blur_center.x=(double) (image->columns-1)/2.0;
blur_center.y=(double) (image->rows-1)/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(double) (n-1);
cos_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*cos_theta));
sin_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*sin_theta));
if ((cos_theta == (double *) NULL) ||
(sin_theta == (double *) NULL))
{
if (cos_theta != (double *) NULL)
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
if (sin_theta != (double *) NULL)
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(double) (n-1)/2.0;
for (i=0; i < (ssize_t) n; i++)
{
cos_theta[i]=cos((double) (theta*i-offset));
sin_theta[i]=sin((double) (theta*i-offset));
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
radial_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
radius;
PointInfo
center;
register ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const Quantum
*magick_restrict r;
register ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
gamma=0.0;
pixel=0.0;
if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) ||
(channel == AlphaPixelChannel))
{
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=r[i];
gamma++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
double
alpha;
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) QuantumScale*GetPixelAlpha(image,r);
pixel+=alpha*r[i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
radial_view=DestroyCacheView(radial_view);
image_view=DestroyCacheView(image_view);
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view,
*luminance_view;
Image
*blur_image,
*luminance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
register ssize_t
i;
size_t
width;
ssize_t
center,
j,
u,
v,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,width*sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) (width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
register const MagickRealType
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double)
*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
luminance_image=CloneImage(image,0,0,MagickTrue,exception);
if (luminance_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
status=TransformImageColorspace(luminance_image,GRAYColorspace,exception);
if (status == MagickFalse)
{
luminance_image=DestroyImage(luminance_image);
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)*
((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L));
image_view=AcquireVirtualCacheView(image,exception);
luminance_view=AcquireVirtualCacheView(luminance_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
contrast;
MagickBooleanType
sync;
register const Quantum
*magick_restrict l,
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t)
((width-1)/2L),image->columns+width,width,exception);
l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y-
(ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity;
register ssize_t
i;
intensity=GetPixelIntensity(image,p+center);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict luminance_pixels,
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel;
pixel=0.0;
pixels=p;
luminance_pixels=l;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,luminance_pixels)-
intensity;
if (fabs(contrast) < threshold)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(image,pixels)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
l+=GetPixelChannels(luminance_image);
q+=GetPixelChannels(blur_image);
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
luminance_view=DestroyCacheView(luminance_view);
image_view=DestroyCacheView(image_view);
luminance_image=DestroyImage(luminance_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define GetShadeIntensity(image,pixel) \
ClampPixel(GetPixelIntensity((image),(pixel)))
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*linear_image,
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
shade_image=CloneImage(image,0,0,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (shade_image != (Image *) NULL)
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
shade_view=AcquireAuthenticCacheView(shade_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(linear_image,shade_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
double
distance,
normal_distance,
shade;
PrimaryInfo
normal;
register const Quantum
*magick_restrict center,
*magick_restrict p,
*magick_restrict post,
*magick_restrict pre;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3,
exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
register ssize_t
i;
/*
Determine the surface normal and compute shading.
*/
pre=p+GetPixelChannels(linear_image);
center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image);
post=center+(linear_image->columns+2)*GetPixelChannels(linear_image);
normal.x=(double) (
GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image)));
normal.y=(double) (
GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,post)+
GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre)-
GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image)));
if ((fabs(normal.x) <= MagickEpsilon) &&
(fabs(normal.y) <= MagickEpsilon))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=normal.x*normal.x+normal.y*normal.y+
normal.z*normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel
channel;
PixelTrait
shade_traits,
traits;
channel=GetPixelChannelChannel(linear_image,i);
traits=GetPixelChannelTraits(linear_image,channel);
shade_traits=GetPixelChannelTraits(shade_image,channel);
if ((traits == UndefinedPixelTrait) ||
(shade_traits == UndefinedPixelTrait))
continue;
if ((shade_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if ((traits & UpdatePixelTrait) == 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if (gray != MagickFalse)
{
SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q);
continue;
}
SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade*
center[i]),q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(shade_image);
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*sharp_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
normalize=0.0;
j=(ssize_t) (kernel_info->width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0*
MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel_info->values[i];
i++;
}
}
kernel_info->values[i/2]=(double) ((-2.0)*normalize);
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
sharp_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a square area defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,
% const PixelInterpolateMethod method,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: intepolation method.
%
% o radius: choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,
const PixelInterpolateMethod method,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
size_t
width;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
spread_image=CloneImage(image,0,0,MagickTrue,exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse)
{
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
spread_view=AcquireAuthenticCacheView(spread_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,spread_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PointInfo
point;
point.x=GetPseudoRandomValue(random_info[id]);
point.y=GetPseudoRandomValue(random_info[id]);
status=InterpolatePixelChannels(image,image_view,spread_image,method,
(double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q,
exception);
if (status == MagickFalse)
break;
q+=GetPixelChannels(spread_image);
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
spread_image=DestroyImage(spread_image);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o gain: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence gain.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double gain,const double threshold,
ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
/* This kernel appears to be broken.
#if defined(MAGICKCORE_OPENCL_SUPPORT)
unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold,
exception);
if (unsharp_image != (Image *) NULL)
return(unsharp_image);
#endif
*/
unsharp_image=BlurImage(image,radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(double) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,unsharp_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits,
unsharp_traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
unsharp_traits=GetPixelChannelTraits(unsharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(unsharp_traits == UndefinedPixelTrait))
continue;
if ((unsharp_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(unsharp_image,channel,p[i],q);
continue;
}
pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q);
if (fabs(2.0*pixel) < quantum_threshold)
pixel=(double) p[i];
else
pixel=(double) p[i]+gain*pixel;
SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(unsharp_image);
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
GB_unop__identity_uint16_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_uint64)
// op(A') function: GB (_unop_tran__identity_uint16_uint64)
// C type: uint16_t
// A type: uint64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_uint64)
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bget_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__bget_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__bget_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int16)
// C=scalar+B GB (_bind1st__bget_int16)
// C=scalar+B' GB (_bind1st_tran__bget_int16)
// C=A+scalar GB (_bind2nd__bget_int16)
// C=A'+scalar GB (_bind2nd_tran__bget_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = GB_BITGET (aij, bij, int16_t, 16)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITGET (x, y, int16_t, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT16 || GxB_NO_BGET_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bget_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITGET (x, bij, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITGET (aij, y, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (x, aij, int16_t, 16) ; \
}
GrB_Info GB (_bind1st_tran__bget_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (aij, y, int16_t, 16) ; \
}
GrB_Info GB (_bind2nd_tran__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
stream_heap.c | /*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <unistd.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
# include <stdlib.h>
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#ifndef STREAM_ARRAY_SIZE
# define STREAM_ARRAY_SIZE 100000000 /* kyungsan : increased 10 times */
/*# define STREAM_ARRAY_SIZE 10000000 */
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 10
#endif
#endif
#ifndef NTIMES
# define NTIMES 10
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to mccalpin@cs.virginia.edu
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
static STREAM_TYPE* a = NULL;
static STREAM_TYPE* b = NULL;
static STREAM_TYPE* c = NULL;
/*
static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET],
b[STREAM_ARRAY_SIZE+OFFSET],
c[STREAM_ARRAY_SIZE+OFFSET];
*/
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
printf("STREAM version $Revision: 5.10 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf("Alloc heap start \n");
double arr_size = sizeof(STREAM_TYPE) * (STREAM_ARRAY_SIZE+OFFSET);
a = malloc(arr_size);
b = malloc(arr_size);
c = malloc(arr_size);
if(!a || !b || !c){
printf("Alloc heap failure \n");
exit(1);
}
printf("Alloc heap done \n");
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0),
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.),
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
#ifdef _OPENMP
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n",k);
#endif
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Triad(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
#endif
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Best Rate MB/s Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
free(a);
free(b);
free(c);
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
void checkSTREAMresults ()
{
STREAM_TYPE aj,bj,cj,scalar;
STREAM_TYPE aSumErr,bSumErr,cSumErr;
STREAM_TYPE aAvgErr,bAvgErr,cAvgErr;
double epsilon;
ssize_t j;
int k,ierr,err;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
aSumErr += abs(a[j] - aj);
bSumErr += abs(b[j] - bj);
cSumErr += abs(c[j] - cj);
// if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN
}
aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
if (sizeof(STREAM_TYPE) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(STREAM_TYPE) == 8) {
epsilon = 1.e-13;
}
else {
printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE));
epsilon = 1.e-6;
}
err = 0;
if (abs(aAvgErr/aj) > epsilon) {
err++;
printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(a[j]/aj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,aj,a[j],abs((aj-a[j])/aAvgErr));
}
#endif
}
}
printf(" For array a[], %d errors were found.\n",ierr);
}
if (abs(bAvgErr/bj) > epsilon) {
err++;
printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(b[j]/bj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,bj,b[j],abs((bj-b[j])/bAvgErr));
}
#endif
}
}
printf(" For array b[], %d errors were found.\n",ierr);
}
if (abs(cAvgErr/cj) > epsilon) {
err++;
printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(c[j]/cj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,cj,c[j],abs((cj-c[j])/cAvgErr));
}
#endif
}
}
printf(" For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
#ifdef VERBOSE
printf ("Results Validation Verbose Results: \n");
printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj);
printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]);
printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj));
#endif
}
#ifdef TUNED
/* stubs for "tuned" versions of the kernels */
void tuned_STREAM_Copy()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
}
void tuned_STREAM_Triad(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
}
/* end of stubs for the "tuned" versions of the kernels */
#endif
|
jac_cpu.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "func.h"
int main(int argc, char *argv[]) {
int max_iter, N,i,j;
if (argc == 3) {
N = atoi(argv[1]) + 2;
max_iter = atoi(argv[2]);
}
else {
// use default N
N = 128 + 2;
max_iter = 5000;
}
double delta = 2.0/N;
// allocate mem
double *f, *u, *u_old;
int size_f = N * N * sizeof(double);
int size_u = N * N * sizeof(double);
int size_u_old = N * N * sizeof(double);
f = (double *)malloc(size_f);
u = (double *)malloc(size_u);
u_old = (double *)malloc(size_u_old);
if (f == NULL || u == NULL || u_old ==NULL) {
fprintf(stderr, "memory allocation failed!\n");
return(1);
}
// initilize boarder
#pragma omp parallel shared(f,u,u_old,N) private(i,j)
{
#pragma omp for
for (i = 0; i < N; i++){
for (j = 0; j < N; j++){
if (i >= N * 0.5 && i <= N * 2.0/3.0 && j >= N * 1.0/6.0 && j <= N * 1.0/3.0)
f[i*N + j] = 200.0;
else
f[i*N + j] = 0.0;
if (i == (N - 1) || i == 0 || j == (N - 1)){
u[i*N + j] = 20.0;
u_old[i*N + j] = 20.0;
}
else{
u[i*N + j] = 0.0;
u_old[i*N + j] = 0.0;
}
}
}
} /* end of parallel region */
// do program
double time_compute = omp_get_wtime();
jac_cpu(N, delta, max_iter,f,u,u_old);
double tot_time_compute = omp_get_wtime() - time_compute;
// end program
// stats
double GB = 1.0e-09;
double flop = max_iter * (double)(N-2) * (double)(N-2) * 10.0;
double gflops = (flop / tot_time_compute) * GB;
double memory = size_f + size_u + size_u_old;
double memoryGBs = memory * GB * (1 / tot_time_compute);
printf("%g\t", memory); // footprint
printf("%g\t", gflops); // Gflops
printf("%g\t", memoryGBs); // bandwidth GB/s
printf("%g\t", tot_time_compute); // total time
printf("%g\t", 0); // I/O time
printf("%g\t", tot_time_compute); // compute time
printf("# cpu\n");
//write_result(u, N, delta, "./../../analysis/pos/jac_cpu.txt");
// free mem
free(f);
free(u);
free(u_old);
// end program
return(0);
}
|
depend-4.c | #include <stdlib.h>
#include <unistd.h>
int
main ()
{
#pragma omp parallel
#pragma omp single
{
int x = 1, y = 2, z = 3;
#pragma omp taskgroup
{
#pragma omp task shared (x, y, z) depend(inout: x, y) \
depend (in: z) if (x > 10)
{
if (x != 1 || y != 2 || z != 3)
abort ();
x = 4;
y = 5;
}
/* The above task has depend clauses, but no dependencies
on earlier tasks, and is if (0), so must be scheduled
immediately. */
if (x != 4 || y != 5)
abort ();
}
#pragma omp taskgroup
{
#pragma omp task shared (x, y) depend(in: x, y)
{
usleep (10000);
if (x != 4 || y != 5 || z != 3)
abort ();
}
#pragma omp task shared (x, y) depend(in: x, y)
{
usleep (10000);
if (x != 4 || y != 5 || z != 3)
abort ();
}
#pragma omp task shared (x, y, z) depend(inout: x, y) \
depend (in: z) if (x > 10)
{
if (x != 4 || y != 5 || z != 3)
abort ();
x = 6;
y = 7;
}
/* The above task has depend clauses, and may have dependencies
on earlier tasks, while it is if (0), it can be deferred. */
}
if (x != 6 || y != 7)
abort ();
}
return 0;
}
|
polybench.c | /**
* polybench.c: This file is part of the PolyBench/C 3.2 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sched.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
/* By default, collect PAPI counters on thread 0. */
#ifndef POLYBENCH_THREAD_MONITOR
# define POLYBENCH_THREAD_MONITOR 0
#endif
/* Total LLC cache size. By default 32+MB.. */
#ifndef POLYBENCH_CACHE_SIZE_KB
# define POLYBENCH_CACHE_SIZE_KB 32770
#endif
int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR;
double polybench_program_total_flops = 0;
#ifdef POLYBENCH_PAPI
# include <papi.h>
# define POLYBENCH_MAX_NB_PAPI_COUNTERS 96
char* _polybench_papi_eventlist[] = {
#include "papi_counters.list"
NULL
};
int polybench_papi_eventset;
int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS];
long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS];
#endif
/* Timer code (gettimeofday). */
double polybench_t_start, polybench_t_end;
/* Timer code (RDTSC). */
unsigned long long int polybench_c_start, polybench_c_end;
static
double rtclock()
{
#ifdef POLYBENCH_TIME
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, NULL);
if (stat != 0)
printf ("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
#else
return 0;
#endif
}
#ifdef POLYBENCH_CYCLE_ACCURATE_TIMER
static
unsigned long long int rdtsc()
{
unsigned long long int ret = 0;
unsigned int cycles_lo;
unsigned int cycles_hi;
__asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi));
ret = (unsigned long long int)cycles_hi << 32 | cycles_lo;
return ret;
}
#endif
void polybench_flush_cache()
{
int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double);
double* flush = (double*) calloc (cs, sizeof(double));
int i;
double tmp = 0.0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
free (flush);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);
sched_setscheduler (0, SCHED_FIFO, &schedParam);
}
void polybench_linux_standard_scheduler()
{
/* Restore to standard scheduler policy. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER);
sched_setscheduler (0, SCHED_OTHER, &schedParam);
}
#endif
#ifdef POLYBENCH_PAPI
static
void test_fail(char *file, int line, char *call, int retval)
{
char buf[128];
memset(buf, '\0', sizeof(buf));
if (retval != 0)
fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line);
else
{
fprintf (stdout,"%-40s SKIPPED\n", file);
fprintf (stdout,"Line # %d\n", line);
}
if (retval == PAPI_ESYS)
{
sprintf (buf, "System error in %s", call);
perror (buf);
}
else if (retval > 0)
fprintf (stdout,"Error: %s\n", call);
else if (retval == 0)
fprintf (stdout,"Error: %s\n", call);
else
{
char errstring[PAPI_MAX_STR_LEN];
PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN);
fprintf (stdout,"Error in %s: %s\n", call, errstring);
}
fprintf (stdout,"\n");
if (PAPI_is_initialized ())
PAPI_shutdown ();
exit (1);
}
void polybench_papi_init()
{
# ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
if (omp_get_max_threads () < polybench_papi_counters_threadid)
polybench_papi_counters_threadid = omp_get_max_threads () - 1;
}
#pragma omp barrier
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
polybench_papi_eventset = PAPI_NULL;
if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT)
test_fail (__FILE__, __LINE__, "PAPI_library_init", retval);
if ((retval = PAPI_create_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval);
int k;
for (k = 0; _polybench_papi_eventlist[k]; ++k)
{
if ((retval =
PAPI_event_name_to_code (_polybench_papi_eventlist[k],
&(polybench_papi_eventlist[k])))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval);
}
polybench_papi_eventlist[k] = 0;
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_close()
{
# ifdef _OPENMP
#pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval);
if (PAPI_is_initialized ())
PAPI_shutdown ();
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
int polybench_papi_start_counter(int evid)
{
# ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
# endif
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval = 1;
char descr[PAPI_MAX_STR_LEN];
PAPI_event_info_t evinfo;
PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr);
if (PAPI_add_event (polybench_papi_eventset,
polybench_papi_eventlist[evid]) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_add_event", 1);
if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo)
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval);
if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_start", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
return 0;
}
void polybench_papi_stop_counter(int evid)
{
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
long_long values[1];
values[0] = 0;
if ((retval = PAPI_read (polybench_papi_eventset, &values[0]))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_read", retval);
if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_stop", retval);
polybench_papi_values[evid] = values[0];
if ((retval = PAPI_remove_event
(polybench_papi_eventset,
polybench_papi_eventlist[evid])) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_print()
{
int verbose = 0;
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num() == polybench_papi_counters_threadid)
{
#ifdef POLYBENCH_PAPI_VERBOSE
verbose = 1;
#endif
if (verbose)
printf ("On thread %d:\n", polybench_papi_counters_threadid);
#endif
int evid;
for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid)
{
if (verbose)
printf ("%s=", _polybench_papi_eventlist[evid]);
printf ("%llu ", polybench_papi_values[evid]);
if (verbose)
printf ("\n");
}
printf ("\n");
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
#endif
/* ! POLYBENCH_PAPI */
void polybench_prepare_instruments()
{
#ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_fifo_scheduler ();
#endif
}
void polybench_timer_start()
{
//printf("In polybench timer start\n");
polybench_prepare_instruments ();
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_start = rtclock ();
#else
polybench_c_start = rdtsc ();
#endif
}
void polybench_timer_stop()
{
//printf("In polybench timer stop\n");
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_end = rtclock ();
#else
polybench_c_end = rdtsc ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_standard_scheduler ();
#endif
}
void polybench_timer_print()
{
#ifdef POLYBENCH_GFLOPS
if (__polybench_program_total_flops == 0)
{
printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n");
printf ("%0.6lf\n", polybench_t_end - polybench_t_start);
}
else
printf ("%0.2lf\n",
(__polybench_program_total_flops /
(double)(polybench_t_end - polybench_t_start)) / 1000000000);
#else
# ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
printf ("%0.6f\n", polybench_t_end - polybench_t_start);
# else
printf ("%Ld\n", polybench_c_end - polybench_c_start);
# endif
#endif
}
static
void *
xmalloc (size_t num)
{
void* new = NULL;
int ret = posix_memalign (&new, 32, num);
if (! new || ret)
{
fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory");
exit (1);
}
return new;
}
void* polybench_alloc_data(unsigned long long int n, int elt_size)
{
/// FIXME: detect overflow!
size_t val = n;
val *= elt_size;
void* ret = xmalloc (val);
return ret;
}
|
omp_cherk_batch.c | /**
* @file omp_cherk_batch.c
*
* @brief BBLAS cherk_batch float _Complex routine.
*
* BBLAS is a software package provided by Univ. of Manchester,
* Univ. of Tennessee.
*
* @version 1.0.0
* @author Samuel D. Relton
* @author Pedro V. Lara
* @author Mawussi Zounon
* @date 2016-02-20
*
**/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* Code generation
* @generated from ./bblas_omp/omp_zherk_batch.c normal z -> c, Mon Jun 6 09:44:14 2016
**/
#endif
#include<cblas.h>
#include "bblas_omp.h"
#include "bblas.h"
#include <omp.h>
#define COMPLEX
/**
Purpose
-------
<b>cherk_batch</b> is an OpenMP version of cherk_batch.
It performs the matrix-matrix operations
arrayC[i] = alpha[i]*arrayA[i]*arrayA[i**H] + beta[i]*arrayC[i], or
arrayC[i] = alpha[i]*arrayA[i]**H *arrayA[i] + beta[i]*arrayC[i],
where alpha[i] and beta[i] are real scalars, arrayC[i] are matrices with
an N[i] by N[i] hermitian matrix and arrayA[i] are N[i] by K[i] mtrices in the first
case and a K[i] by N[i] in the second case.
Fixed and Variable Batch Operations
-----------------------------------
Two types of batch operation are supported depending upon the value of batch_opts.
When <tt>batch_opts = BBLAS_VARIABLE</tt>
- all parameters that are arrays must have length at least batch_count.
- all parameters that are arrays must have all values set.
When <tt>batch_opts = BBLAS_FIXED</tt>
- all parameters that are arrays (except for arrayA, arrayC, and info)
must have length at least one.
- all parameters that are arrays (except for arrayA, arrayC, and info)
need only to have their first value set.
This means that for a <tt>BBLAS_FIXED</tt> batch,
the values of uplo[0], trans[0], N[0], K[0],
alpha[0], beta[0], lda[0], and ldc[0] are used for all computations.
Parameters
----------
@param[in]
uplo Array of <tt>enum BBLAS_UPLO</tt>.
On entry, uplo[i] specifies whether the upper or
lower triangular part of the matrix arrayC[i]
is to be referenced as follows:
- = 'BblasUpper' Only the upper triangular part of
arrayC[i] is to be referenced.
- = 'BblasLower' Only the lower triangular part of
arrayC[i] is to be referenced.
@param[in]
trans Array of <tt>enum BBLAS_TRANS</tt>.
On entry, trans[i] specifies the operation to be
performed as follows:
- = 'BblasNoTrans' arrayC[i] = alpha[i]*arrayA[i]*arrayA[i]**H + beta[i]*arrayC[i].
- = 'BblasConjTrans' arrayC[i] = alpha[i]*arrayA[i]**H *arrayA[i] + beta[i]*arrayC[i].
@param[in]
N Array of <tt>int</tt>.
Each element N[i] specifies the number of rows and columns of the matrix
arrayC[i]. N[i] must be greater than zero.
@param[in]
K Array of <tt>int</tt>.
On entry with trans[i] = 'BblasNoTrans', K[i] specifies the
number of columns of the matrix arrayA[i],
and upon entry with trans[i] = 'BblasConjTrans',
K[i] specifies the number of rows of the matrix arrayA[i].
K[i] must be greater than zero.
@param[in]
alpha Array of <tt>complex_16</tt>.
@param[in]
arrayA Array of pointers.
Each element arrayA[i] is a pointer to a COMPLEX matrix of
dimension lda[i] by Ka[i],
where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise.
Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i]
part of the arrayA[i] must contain the elements of arrayA[i], otherwise
the leading K[i] by N[i] part of the arrayA[i] must contain the
elements of arrayA[i].
@param[in]
lda Array of <tt>int</tt>.
On entry, lda[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When transA[i] = BblasNoTrans then
lda[i] must be at least max( 1, N[i] ), otherwise lda[i] must be at
least max( 1, K[i] ).
@param[in]
beta Array of <tt>complex_16</tt>.
When beta[i] is set to zero arrayC[i] need not be set on input.
@param[in,out]
arrayC Array of pointers.
Each elements arrayC[i] is a pointer to a COMPLEX matrix of
dimension ldc[i] by N[i].
Before entry with uplo[i] = 'BblasUpper', the leading
N[i] by N[i] upper triangular part of the arrayC[i] must con-
tain the upper triangular part of the hermitian
matrix and the strictly lower triangular part of arrayC[i]
is not referenced. On exit, the upper triangular
part of the arrayC[i] is overwritten by the upper tri-
angular part of the updated matrix.
Before entry with uplo[i] = 'BblasLower', the leading N[i] by N[i] lower
triangular part of the arrayC[i] must contain the lower
triangular part of the hermitian matrix and the
strictly upper triangular part of arrayC[i] is not refer-
enced. On exit, the lower triangular part of the
arrayC[i] is overwritten by the lower triangular part
of the updated matrix.
Note that the imaginary parts of the diagonal elements need not be set,
they are assumed to be zero,
and on exit they are set to zero.
@param[in]
ldc Array of <tt>int</tt>.
On entry, ldc[i] specifies the first dimension of arrayC[i] as declared
in the calling (sub) program. Each element ldc must be at least max( 1, N[i] ).
@param[in]
batch_count <tt>int</tt>
The number of matrices to operate on.
@param[in]
batch_opts <tt>enum BBLAS_OPTS</tt>
One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of
batch operation required.
@param[out]
info Array of <tt>int</tt>.
Each element info[i] is the error return code of the ith cherk in the batch,
these need not be set on entry.
The error codes can be found in bblas_macros.h.
**/
void omp_cherk_batch(
const enum BBLAS_UPLO *uplo, const enum BBLAS_TRANS *trans,
const int *N, const int *K, const float *alpha,
const BBLAS_Complex32_t **arrayA, const int *lda,
const float *beta, BBLAS_Complex32_t **arrayC,
const int *ldc, const int batch_count, enum BBLAS_OPTS batch_opts, int *info)
{
/*Local variables */
int first_index = 0;
int batch_iter;
int LDA;
char func_name[15] = "cherk_batch";
/* Check input arguments */
if (batch_count < 0)
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1);
}
if (batch_opts == BBLAS_FIXED)
{
if ((uplo[first_index] != BblasUpper) &&
(uplo[first_index] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_UPLO;
}
return;
}
if ((trans[first_index] != BblasNoTrans) &&
(trans[first_index] != BblasTrans) &&
(trans[first_index] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_TRANS;
}
return;
}
if (N[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_N;
}
return;
}
if (K[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_K;
}
return;
}
if (trans[first_index] == BblasNoTrans)
{
LDA = N[first_index];
} else
{
LDA = K[first_index];
}
if (lda[first_index] < max(1, LDA)){
xerbla_batch(func_name, BBLAS_ERR_LDA, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[first_index] = BBLAS_ERR_LDA;
}
return;
}
if (ldc[first_index] < max(1, N[first_index]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDC;
}
return;
}
/* particular case */
if (N[first_index] == 0 ||
((K[first_index] == 0 || alpha[first_index] == (float)0.0) &&
(beta[first_index] == (float)1.0)))
{
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_SUCCESS;
}
return;
}
#pragma omp parallel for private(batch_iter)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/*Call to cblas_cherk */
cblas_cherk(
BblasColMajor,
uplo[first_index],
trans[first_index],
N[first_index],
K[first_index],
alpha[first_index],
arrayA[batch_iter],
lda[first_index],
beta[first_index],
arrayC[batch_iter],
ldc[first_index]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
} /*END FIXED SIZE FOR LOOP */
}else if (batch_opts == BBLAS_VARIABLE)
{
#pragma omp parallel for private(batch_iter, LDA)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/* Check input arguments */
if ((uplo[batch_iter] != BblasUpper) &&
(uplo[batch_iter] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter);
info[batch_iter] = BBLAS_ERR_UPLO;
continue;
}
if ((trans[batch_iter] != BblasNoTrans) &&
(trans[batch_iter] != BblasTrans) &&
(trans[batch_iter] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, batch_iter);
info[batch_iter] = BBLAS_ERR_TRANS;
continue;
}
if (N[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, batch_iter);
info[batch_iter] = BBLAS_ERR_N;
continue;
}
if (K[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, batch_iter);
info[batch_iter] = BBLAS_ERR_K;
continue;
}
if (trans[batch_iter] == BblasNoTrans){
LDA = N[batch_iter];
}
else
{
LDA = K[batch_iter];
}
if (lda[batch_iter] < max(1, LDA)){
xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter);
info[batch_iter] = BBLAS_ERR_LDA;
continue;
}
if (ldc[batch_iter] < max(1, N[batch_iter]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter);
info[batch_iter] = BBLAS_ERR_LDC;
continue;
}
/* particular case */
if (N[batch_iter] == 0 ||
((K[batch_iter] == 0 || alpha[batch_iter] == (float)0.0) &&
(beta[batch_iter] == (float)1.0)))
{
info[batch_iter] = BBLAS_SUCCESS;
continue;
}
cblas_cherk(
BblasColMajor,
uplo[batch_iter],
trans[batch_iter],
N[batch_iter],
K[batch_iter],
alpha[batch_iter],
arrayA[batch_iter],
lda[batch_iter],
beta[batch_iter],
arrayC[batch_iter],
ldc[batch_iter]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
}
}else
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1);
}
}
#undef COMPLEX
|
GB_binop__max_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__max_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__max_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint16)
// A*D function (colscale): GB (_AxD__max_uint16)
// D*A function (rowscale): GB (_DxB__max_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint16)
// C=scalar+B GB (_bind1st__max_uint16)
// C=scalar+B' GB (_bind1st_tran__max_uint16)
// C=A+scalar GB (_bind2nd__max_uint16)
// C=A'+scalar GB (_bind2nd_tran__max_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT16 || GxB_NO_MAX_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_uint32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint32_int32
// op(A') function: GB_tran__abs_uint32_int32
// C type: uint32_t
// A type: int32_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint32_int32
(
uint32_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, RowMajor> {
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar *lhs, Index lhsStride,
const RhsScalar *rhs, Index rhsStride,
ResScalar *res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar, LhsScalar> &blocking,
GemmParallelInfo <Index> *info = 0) {
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder == RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder == RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha, blocking, info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, ColMajor> {
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar *_lhs, Index lhsStride,
const RhsScalar *_rhs, Index rhsStride,
ResScalar *res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar, RhsScalar> &blocking,
GemmParallelInfo <Index> *info = 0) {
const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs, lhsStride);
const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs, rhsStride);
typedef gebp_traits<LhsScalar, RhsScalar> Traits;
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows, blocking.mc()); // cache block size along the M direction
//Index nc = blocking.nc(); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
std::size_t sizeA = kc*mc;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
RhsScalar* blockB = blocking.blockB();
eigen_internal_assert(blockB!=0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'.
pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
// Pack B_k to B' in a parallel fashion:
// each thread packs the sub block B_k,j to B'_j where j is the thread id.
// However, before copying to B'_j, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
// Notify the other threads that the part B'_j is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per B'_j
for(Index shift=0; shift<threads; ++shift)
{
Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if(shift>0)
while(info[j].sync!=k) {}
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
}
// Then keep going as usual with the remaining A'
for(Index i=mc; i<rows; i+=mc)
{
const Index actual_mc = (std::min)(i+mc,rows)-i;
// pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
// C_i += A' * B'
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
}
// Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index j=0; j<threads; ++j)
{
#pragma omp atomic
info[j].users -= 1;
}
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc * mc;
std::size_t sizeB = kc * cols;
std::size_t sizeW = kc * Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
for (Index k2 = 0; k2 < depth; k2 += kc) {
const Index actual_kc = (std::min)(k2 + kc, depth) - k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
// Note that this panel will be read as many times as the number of blocks in the lhs's
// vertical panel which is, in practice, a very low number.
pack_rhs(blockB, &rhs(k2, 0), rhsStride, actual_kc, cols);
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
for (Index i2 = 0; i2 < rows; i2 += mc) {
const Index actual_mc = (std::min)(i2 + mc, rows) - i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro vertical panel of the large rhs's panel (e.g., cols/4 times).
pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
// Everything is packed, we can now call the block * panel kernel:
gebp(res + i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
}
}
}
}
};
/*********************************************************************************
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Lhs, typename Rhs>
struct traits<GeneralProduct < Lhs, Rhs, GemmProduct> >
: traits<ProductBase < GeneralProduct < Lhs, Rhs, GemmProduct>, Lhs, Rhs> > {
};
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor {
gemm_functor(const Lhs &lhs, const Rhs &rhs, Dest &dest, const Scalar &actualAlpha,
BlockingType &blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {}
void initParallelSession() const {
m_blocking.allocateB();
}
void operator()(Index row, Index rows, Index col = 0, Index cols = -1, GemmParallelInfo <Index> *info = 0) const {
if (cols == -1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
/*(const Scalar*)*/&m_lhs.coeffRef(row, 0), m_lhs.outerStride(),
/*(const Scalar*)*/&m_rhs.coeffRef(0, col), m_rhs.outerStride(),
(Scalar *) &(m_dest.coeffRef(row, col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
protected:
const Lhs &m_lhs;
const Rhs &m_rhs;
Dest &m_dest;
Scalar m_actualAlpha;
BlockingType &m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor = 1,
bool FiniteAtCompileTime = MaxRows != Dynamic && MaxCols != Dynamic && MaxDepth != Dynamic>
class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking {
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar *m_blockA;
RhsScalar *m_blockB;
RhsScalar *m_blockW;
DenseIndex m_mc;
DenseIndex m_nc;
DenseIndex m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0) {}
inline DenseIndex mc() const { return m_mc; }
inline DenseIndex nc() const { return m_nc; }
inline DenseIndex kc() const { return m_kc; }
inline LhsScalar *blockA() { return m_blockA; }
inline RhsScalar *blockB() { return m_blockB; }
inline RhsScalar *blockW() { return m_blockW; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, true>
: public level3_blocking<
typename conditional<StorageOrder == RowMajor, _RhsScalar, _LhsScalar>::type,
typename conditional<StorageOrder == RowMajor, _LhsScalar, _RhsScalar>::type> {
enum {
Transpose = StorageOrder == RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose, _RhsScalar, _LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose, _LhsScalar, _RhsScalar>::type RhsScalar;
typedef gebp_traits <LhsScalar, RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth,
SizeW = MaxDepth * Traits::WorkSpaceFactor
};
EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
public:
gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/) {
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
this->m_blockW = m_staticW;
}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateW() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder == RowMajor, _RhsScalar, _LhsScalar>::type,
typename conditional<StorageOrder == RowMajor, _LhsScalar, _RhsScalar>::type> {
enum {
Transpose = StorageOrder == RowMajor
};
typedef typename conditional<Transpose, _RhsScalar, _LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose, _LhsScalar, _RhsScalar>::type RhsScalar;
typedef gebp_traits <LhsScalar, RhsScalar> Traits;
DenseIndex m_sizeA;
DenseIndex m_sizeB;
DenseIndex m_sizeW;
public:
gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth) {
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
computeProductBlockingSizes<LhsScalar, RhsScalar, KcFactor>(this->m_kc, this->m_mc, this->m_nc);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
m_sizeW = this->m_kc * Traits::WorkSpaceFactor;
}
void allocateA() {
if (this->m_blockA == 0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB() {
if (this->m_blockB == 0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateW() {
if (this->m_blockW == 0)
this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
}
void allocateAll() {
allocateA();
allocateB();
allocateW();
}
~gemm_blocking_space() {
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
aligned_delete(this->m_blockW, m_sizeW);
}
};
} // end namespace internal
template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, GemmProduct>
: public ProductBase<GeneralProduct<Lhs, Rhs, GemmProduct>, Lhs, Rhs> {
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime, Rhs::MaxRowsAtCompileTime)
};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef Scalar ResScalar;
GeneralProduct(const Lhs &lhs, const Rhs &rhs) : Base(lhs, rhs) {
#if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG))
typedef internal::scalar_product_op <LhsScalar, RhsScalar> BinOp;
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp, LhsScalar, RhsScalar);
#endif
}
template<typename Dest>
void scaleAndAddTo(Dest &dst, const Scalar &alpha) const {
eigen_assert(dst.rows() == m_lhs.rows() && dst.cols() == m_rhs.cols());
if (m_lhs.cols() == 0 || m_lhs.rows() == 0 || m_rhs.cols() == 0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags & RowMajorBit) ? RowMajor : ColMajor, LhsScalar, RhsScalar,
Dest::MaxRowsAtCompileTime, Dest::MaxColsAtCompileTime, MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor <
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (_ActualLhsType::Flags & RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (_ActualRhsType::Flags & RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags & RowMajorBit) ? RowMajor : ColMajor>,
_ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime > 32 || Dest::MaxRowsAtCompileTime == Dynamic)>(
GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags & RowMajorBit);
}
};
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
for-12.c | int foo (void)
{
int i, a;
a = 30;
#pragma omp parallel for lastprivate (a)
for (i = 0; i < 10; i++)
a = a + i;
return a;
}
|
particle_filter_tracking.h | // -*- mode: C++ -*-
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2013, Yuto Inagaki and JSK Lab
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/o2r other materials provided
* with the distribution.
* * Neither the name of the JSK Lab nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#ifndef JSK_PCL_ROS_PARTICLE_FILTER_TRACKING_H_
#define JSK_PCL_ROS_PARTICLE_FILTER_TRACKING_H_
// ros
#include <ros/ros.h>
#include <ros/names.h>
#include <sensor_msgs/PointCloud2.h>
#include <visualization_msgs/Marker.h>
#include <tf/transform_broadcaster.h>
#include <tf_conversions/tf_eigen.h>
#include <jsk_recognition_utils/pcl_conversion_util.h>
#include <jsk_topic_tools/connection_based_nodelet.h>
#include <jsk_recognition_utils/tf_listener_singleton.h>
#include <boost/circular_buffer.hpp>
// pcl
#include <pcl/point_types.h>
#include <pcl/common/centroid.h>
#include <pcl/common/transforms.h>
#include <pcl/search/pcl_search.h>
#include <pcl/common/transforms.h>
#include <pcl/tracking/tracking.h>
#include <pcl/tracking/particle_filter.h>
#include <pcl/tracking/kld_adaptive_particle_filter_omp.h>
#include <pcl/tracking/particle_filter_omp.h>
#include <pcl/tracking/coherence.h>
#include <pcl/tracking/distance_coherence.h>
#include <pcl/tracking/hsv_color_coherence.h>
#include <pcl/tracking/approx_nearest_pair_point_cloud_coherence.h>
#include <pcl/tracking/nearest_pair_point_cloud_coherence.h>
#include <jsk_recognition_msgs/SetPointCloud2.h>
#include <jsk_pcl_ros/ParticleFilterTrackingConfig.h>
#include <jsk_recognition_msgs/BoundingBox.h>
#include <dynamic_reconfigure/server.h>
#include <message_filters/subscriber.h>
#include <message_filters/time_synchronizer.h>
#include <message_filters/synchronizer.h>
#include <pcl/tracking/particle_filter.h>
#include <pcl/tracking/impl/tracking.hpp>
#include <pcl/tracking/impl/particle_filter.hpp>
#include <jsk_recognition_utils/time_util.h>
#include <std_msgs/Float32.h>
#include <jsk_recognition_utils/pcl_util.h>
#include <visualization_msgs/MarkerArray.h>
#include <jsk_recognition_msgs/TrackerStatus.h>
// This namespace follows PCL coding style
namespace pcl
{
namespace tracking
{
// hack pcl::tracking
// original tracker assumes that the number of reference points is smaller
// than the number of input.
// ReversedParticleFilterTracker assumues that the number of reference
// points is greater than the number of input.
// So we need to change:
// 1) transform input pointcloud during weight phase with inverse of each particles
// particle should keep meaning the pose of reference for simplicity.
template <typename PointInT, typename StateT>
class ReversedParticleFilterTracker: public ParticleFilterTracker<PointInT, StateT>
{
public:
using Tracker<PointInT, StateT>::tracker_name_;
using Tracker<PointInT, StateT>::search_;
using Tracker<PointInT, StateT>::input_;
using Tracker<PointInT, StateT>::indices_;
using Tracker<PointInT, StateT>::getClassName;
using ParticleFilterTracker<PointInT, StateT>::particles_;
using ParticleFilterTracker<PointInT, StateT>::change_detector_;
using ParticleFilterTracker<PointInT, StateT>::change_counter_;
using ParticleFilterTracker<PointInT, StateT>::change_detector_resolution_;
using ParticleFilterTracker<PointInT, StateT>::change_detector_interval_;
using ParticleFilterTracker<PointInT, StateT>::use_change_detector_;
using ParticleFilterTracker<PointInT, StateT>::alpha_;
using ParticleFilterTracker<PointInT, StateT>::changed_;
using ParticleFilterTracker<PointInT, StateT>::ref_;
using ParticleFilterTracker<PointInT, StateT>::coherence_;
using ParticleFilterTracker<PointInT, StateT>::use_normal_;
using ParticleFilterTracker<PointInT, StateT>::particle_num_;
using ParticleFilterTracker<PointInT, StateT>::change_detector_filter_;
using ParticleFilterTracker<PointInT, StateT>::transed_reference_vector_;
//using ParticleFilterTracker<PointInT, StateT>::calcLikelihood;
using ParticleFilterTracker<PointInT, StateT>::normalizeWeight;
using ParticleFilterTracker<PointInT, StateT>::initParticles;
using ParticleFilterTracker<PointInT, StateT>::normalizeParticleWeight;
using ParticleFilterTracker<PointInT, StateT>::calcBoundingBox;
typedef Tracker<PointInT, StateT> BaseClass;
typedef typename Tracker<PointInT, StateT>::PointCloudIn PointCloudIn;
typedef typename PointCloudIn::Ptr PointCloudInPtr;
typedef typename PointCloudIn::ConstPtr PointCloudInConstPtr;
typedef typename Tracker<PointInT, StateT>::PointCloudState PointCloudState;
typedef typename PointCloudState::Ptr PointCloudStatePtr;
typedef typename PointCloudState::ConstPtr PointCloudStateConstPtr;
typedef PointCoherence<PointInT> Coherence;
typedef boost::shared_ptr< Coherence > CoherencePtr;
typedef boost::shared_ptr< const Coherence > CoherenceConstPtr;
typedef PointCloudCoherence<PointInT> CloudCoherence;
typedef boost::shared_ptr< CloudCoherence > CloudCoherencePtr;
typedef boost::shared_ptr< const CloudCoherence > CloudCoherenceConstPtr;
// call initCompute only if reference pointcloud is updated
inline void
setReferenceCloud (const PointCloudInConstPtr &ref)
{
ref_ = ref;
if (coherence_) {
coherence_->setTargetCloud (ref_);
coherence_->initCompute ();
}
else {
PCL_ERROR("coherence_ is not yet available!");
}
}
protected:
std::vector<PointCloudInPtr> transed_input_vector_;
virtual bool initCompute()
{
if (!Tracker<PointInT, StateT>::initCompute ())
{
PCL_ERROR ("[pcl::%s::initCompute] Init failed.\n", getClassName ().c_str ());
return (false);
}
// allocate pointclouds in transed_input_vector_ instead of transed_reference_vector_
if (transed_input_vector_.empty ())
{
//std::cout << "initializing " << particle_num_ << " input" << std::endl;
// only one time allocation
transed_input_vector_.resize (particle_num_ + 1);
for (int i = 0; i < particle_num_ + 1; i++)
{
transed_input_vector_[i] = PointCloudInPtr (new PointCloudIn ());
}
}
// set reference instead of input
// if (coherence_) {
// coherence_->setTargetCloud (ref_);
// }
if (!change_detector_)
change_detector_ = boost::shared_ptr<pcl::octree::OctreePointCloudChangeDetector<PointInT> >(new pcl::octree::OctreePointCloudChangeDetector<PointInT> (change_detector_resolution_));
if (!particles_ || particles_->points.empty ())
initParticles (true);
return (true);
}
// only is computation without normal supported
void computeTransformedPointCloudWithoutNormal
(const StateT& hypothesis, PointCloudIn &cloud)
{
const Eigen::Affine3f trans = this->toEigenMatrix (hypothesis);
// destructively assigns to cloud
pcl::transformPointCloud<PointInT> (*input_, cloud, trans);
}
virtual void weight()
{
changed_ = true;
if (!use_normal_)
{
for (size_t i = 0; i < particles_->points.size (); i++)
{
//std::cout << "processing " << i << " particle: " << particles_->points[i].weight << std::endl;
// compute `inverse` of particle
StateT inverse_particle;
Eigen::Affine3f trans = particles_->points[i].toEigenMatrix();
Eigen::Affine3f inverse_trans = trans.inverse();
inverse_particle = StateT::toState(inverse_trans);
computeTransformedPointCloudWithoutNormal (inverse_particle, *transed_input_vector_[i]);
IndicesPtr indices;
coherence_->compute (transed_input_vector_[i], indices, particles_->points[i].weight);
}
}
else
{
for (size_t i = 0; i < particles_->points.size (); i++)
{
StateT inverse_particle;
Eigen::Affine3f trans = particles_->points[i].toEigenMatrix();
Eigen::Affine3f inverse_trans = trans.inverse();
inverse_particle = StateT::toState(inverse_trans);
IndicesPtr indices (new std::vector<int>);
this->computeTransformedPointCloudWithNormal (inverse_particle, *indices, *transed_input_vector_[i]);
coherence_->compute (transed_input_vector_[i], indices, particles_->points[i].weight);
}
}
normalizeWeight ();
}
private:
};
template <typename PointInT, typename StateT>
class ReversedParticleFilterOMPTracker: public ReversedParticleFilterTracker<PointInT, StateT>
{
public:
using Tracker<PointInT, StateT>::tracker_name_;
using Tracker<PointInT, StateT>::search_;
using Tracker<PointInT, StateT>::input_;
using Tracker<PointInT, StateT>::indices_;
using Tracker<PointInT, StateT>::getClassName;
using ParticleFilterTracker<PointInT, StateT>::particles_;
using ParticleFilterTracker<PointInT, StateT>::change_detector_;
using ParticleFilterTracker<PointInT, StateT>::change_counter_;
using ParticleFilterTracker<PointInT, StateT>::change_detector_resolution_;
using ParticleFilterTracker<PointInT, StateT>::change_detector_interval_;
using ParticleFilterTracker<PointInT, StateT>::use_change_detector_;
using ParticleFilterTracker<PointInT, StateT>::alpha_;
using ParticleFilterTracker<PointInT, StateT>::changed_;
using ParticleFilterTracker<PointInT, StateT>::ref_;
using ParticleFilterTracker<PointInT, StateT>::coherence_;
using ParticleFilterTracker<PointInT, StateT>::use_normal_;
using ParticleFilterTracker<PointInT, StateT>::particle_num_;
using ParticleFilterTracker<PointInT, StateT>::change_detector_filter_;
using ParticleFilterTracker<PointInT, StateT>::transed_reference_vector_;
//using ParticleFilterTracker<PointInT, StateT>::calcLikelihood;
using ParticleFilterTracker<PointInT, StateT>::normalizeWeight;
using ParticleFilterTracker<PointInT, StateT>::initParticles;
using ParticleFilterTracker<PointInT, StateT>::normalizeParticleWeight;
using ParticleFilterTracker<PointInT, StateT>::calcBoundingBox;
typedef Tracker<PointInT, StateT> BaseClass;
typedef typename Tracker<PointInT, StateT>::PointCloudIn PointCloudIn;
typedef typename PointCloudIn::Ptr PointCloudInPtr;
typedef typename PointCloudIn::ConstPtr PointCloudInConstPtr;
typedef typename Tracker<PointInT, StateT>::PointCloudState PointCloudState;
typedef typename PointCloudState::Ptr PointCloudStatePtr;
typedef typename PointCloudState::ConstPtr PointCloudStateConstPtr;
typedef PointCoherence<PointInT> Coherence;
typedef boost::shared_ptr< Coherence > CoherencePtr;
typedef boost::shared_ptr< const Coherence > CoherenceConstPtr;
typedef PointCloudCoherence<PointInT> CloudCoherence;
typedef boost::shared_ptr< CloudCoherence > CloudCoherencePtr;
typedef boost::shared_ptr< const CloudCoherence > CloudCoherenceConstPtr;
using ReversedParticleFilterTracker<PointInT, StateT>::transed_input_vector_;
protected:
unsigned int threads_;
public:
ReversedParticleFilterOMPTracker (unsigned int nr_threads = 0)
: ReversedParticleFilterTracker<PointInT, StateT> ()
, threads_ (nr_threads)
{
tracker_name_ = "ReversedParticleFilterOMPTracker";
}
inline void
setNumberOfThreads (unsigned int nr_threads = 0) { threads_ = nr_threads; }
protected:
virtual void weight()
{
changed_ = true;
if (!use_normal_)
{
#ifdef _OPENMP
#pragma omp parallel for num_threads(threads_)
#endif
for (size_t i = 0; i < particles_->points.size (); i++)
{
//std::cout << "processing " << i << " particle: " << particles_->points[i].weight << std::endl;
// compute `inverse` of particle
StateT inverse_particle;
Eigen::Affine3f trans = particles_->points[i].toEigenMatrix();
Eigen::Affine3f inverse_trans = trans.inverse();
inverse_particle = StateT::toState(inverse_trans);
this->computeTransformedPointCloudWithoutNormal (inverse_particle, *transed_input_vector_[i]);
//computeTransformedPointCloudWithoutNormal (particles_->points[i], *transed_input_vector_[i]);
IndicesPtr indices;
coherence_->compute (transed_input_vector_[i], indices, particles_->points[i].weight);
//std::cout << "processing " << i << " particle: " << particles_->points[i].weight << std::endl;
//std::cout << inverse_particle << std::endl;
}
}
else
{
#ifdef _OPENMP
#pragma omp parallel for num_threads(threads_)
#endif
for (size_t i = 0; i < particles_->points.size (); i++)
{
StateT inverse_particle;
Eigen::Affine3f trans = particles_->points[i].toEigenMatrix();
Eigen::Affine3f inverse_trans = trans.inverse();
inverse_particle = StateT::toState(inverse_trans);
IndicesPtr indices (new std::vector<int>);
this->computeTransformedPointCloudWithNormal (inverse_particle, *indices, *transed_input_vector_[i]);
coherence_->compute (transed_input_vector_[i], indices, particles_->points[i].weight);
}
}
normalizeWeight ();
for (size_t i = 0; i < particles_->points.size (); i++)
{
//std::cout << "normalized " << i << " particle: " << particles_->points[i].weight << std::endl;
}
}
private:
};
template <typename PointInT>
class CachedApproxNearestPairPointCloudCoherence: public ApproxNearestPairPointCloudCoherence<PointInT>
{
public:
typedef typename ApproxNearestPairPointCloudCoherence<PointInT>::PointCoherencePtr PointCoherencePtr;
typedef typename ApproxNearestPairPointCloudCoherence<PointInT>::PointCloudInConstPtr PointCloudInConstPtr;
//using NearestPairPointCloudCoherence<PointInT>::search_;
using ApproxNearestPairPointCloudCoherence<PointInT>::maximum_distance_;
using ApproxNearestPairPointCloudCoherence<PointInT>::target_input_;
using ApproxNearestPairPointCloudCoherence<PointInT>::point_coherences_;
using ApproxNearestPairPointCloudCoherence<PointInT>::coherence_name_;
using ApproxNearestPairPointCloudCoherence<PointInT>::new_target_;
using ApproxNearestPairPointCloudCoherence<PointInT>::getClassName;
using ApproxNearestPairPointCloudCoherence<PointInT>::search_;
/** \brief empty constructor */
CachedApproxNearestPairPointCloudCoherence (const double bin_x,
const double bin_y,
const double bin_z) :
ApproxNearestPairPointCloudCoherence<PointInT> (),
bin_x_(bin_x), bin_y_(bin_y), bin_z_(bin_z)
{
coherence_name_ = "CachedApproxNearestPairPointCloudCoherence";
}
protected:
/** \brief compute the nearest pairs and compute coherence using point_coherences_ */
virtual void
computeCoherence (const PointCloudInConstPtr &cloud, const IndicesConstPtr &indices, float &w_j)
{
boost::mutex::scoped_lock lock(cache_mutex_);
double val = 0.0;
//for (size_t i = 0; i < indices->size (); i++)
for (size_t i = 0; i < cloud->points.size (); i++)
{
PointInT input_point = cloud->points[i];
int xi, yi, zi;
computeBin(input_point.getVector3fMap(), xi, yi, zi);
int k_index;
if (checkCache(xi, yi, zi)) {
k_index = getCachedIndex(xi, yi, zi);
}
else {
float k_distance = 0.0; // dummy
search_->approxNearestSearch(input_point, k_index, k_distance);
registerCache(k_index, xi, yi, zi);
}
PointInT target_point = target_input_->points[k_index];
float dist = (target_point.getVector3fMap() - input_point.getVector3fMap()).norm();
if (dist < maximum_distance_)
{
double coherence_val = 1.0;
for (size_t i = 0; i < point_coherences_.size (); i++)
{
PointCoherencePtr coherence = point_coherences_[i];
double w = coherence->compute (input_point, target_point);
coherence_val *= w;
}
val += coherence_val;
}
}
w_j = - static_cast<float> (val);
//ROS_INFO("hit: %d", counter);
}
virtual void computeBin(
const Eigen::Vector3f& p, int& xi, int& yi, int& zi)
{
xi = (int)(p[0]/bin_x_);
yi = (int)(p[1]/bin_y_);
zi = (int)(p[2]/bin_z_);
}
virtual void registerCache(int k_index, int bin_x, int bin_y, int bin_z)
{
//boost::mutex::scoped_lock lock(cache_mutex_);
if (cache_.find(bin_x) == cache_.end()) {
cache_[bin_x] = std::map<int, std::map<int, int> >();
}
if (cache_[bin_x].find(bin_y) == cache_[bin_x].end()) {
cache_[bin_x][bin_y] = std::map<int, int>();
}
cache_[bin_x][bin_y][bin_z] = k_index;
}
virtual int getCachedIndex(int bin_x, int bin_y, int bin_z)
{
//boost::mutex::scoped_lock lock(cache_mutex_);
return cache_[bin_x][bin_y][bin_z];
}
virtual bool checkCache(int bin_x, int bin_y, int bin_z)
{
//boost::mutex::scoped_lock lock(cache_mutex_);
if (cache_.find(bin_x) == cache_.end()) {
return false;
}
else {
if (cache_[bin_x].find(bin_y) == cache_[bin_x].end()) {
return false;
}
else {
if (cache_[bin_x][bin_y].find(bin_z) == cache_[bin_x][bin_y].end()) {
return false;
}
else {
return true;
}
}
}
}
virtual void clearCache()
{
boost::mutex::scoped_lock lock(cache_mutex_);
cache_ = CacheMap();
}
virtual bool initCompute()
{
if (!ApproxNearestPairPointCloudCoherence<PointInT>::initCompute ())
{
PCL_ERROR ("[pcl::%s::initCompute] PointCloudCoherence::Init failed.\n", getClassName ().c_str ());
//deinitCompute ();
return false;
}
clearCache();
return true;
}
//typename boost::shared_ptr<pcl::search::Octree<PointInT> > search_;
typedef std::map<int, std::map<int, std::map<int, int> > > CacheMap;
CacheMap cache_;
boost::mutex cache_mutex_;
double bin_x_;
double bin_y_;
double bin_z_;
};
template <typename PointInT>
class OrganizedNearestPairPointCloudCoherence: public NearestPairPointCloudCoherence<PointInT>
{
public:
using NearestPairPointCloudCoherence<PointInT>::target_input_;
using NearestPairPointCloudCoherence<PointInT>::new_target_;
using NearestPairPointCloudCoherence<PointInT>::getClassName;
typename boost::shared_ptr<pcl::search::OrganizedNeighbor<PointInT> > search_;
protected:
virtual bool initCompute ()
{
if (!PointCloudCoherence<PointInT>::initCompute ())
{
PCL_ERROR ("[pcl::%s::initCompute] PointCloudCoherence::Init failed.\n", getClassName ().c_str ());
//deinitCompute ();
return (false);
}
// initialize tree
if (!search_)
search_.reset (new pcl::search::OrganizedNeighbor<PointInT> (false));
if (new_target_ && target_input_)
{
search_->setInputCloud (target_input_);
if (!search_->isValid())
return false;
new_target_ = false;
}
return true;
}
};
}
}
using namespace pcl::tracking;
namespace jsk_pcl_ros
{
class ParticleFilterTracking: public jsk_topic_tools::ConnectionBasedNodelet
{
public:
typedef pcl::PointXYZRGB PointT;
typedef ParticleFilterTrackingConfig Config;
typedef message_filters::sync_policies::ExactTime<
sensor_msgs::PointCloud2,
jsk_recognition_msgs::BoundingBox > SyncPolicy;
typedef message_filters::sync_policies::ExactTime<
sensor_msgs::PointCloud2,
sensor_msgs::PointCloud2 > SyncChangePolicy;
typedef ParticleFilterTracker<PointT, ParticleXYZRPY>::PointCloudStatePtr
PointCloudStatePtr;
ParticleFilterTracking(): timer_(10), distance_error_buffer_(100), angle_error_buffer_(100), no_move_buffer_(10) {}
protected:
pcl::PointCloud<PointT>::Ptr cloud_pass_;
pcl::PointCloud<PointT>::Ptr cloud_pass_downsampled_;
pcl::PointCloud<PointT>::Ptr target_cloud_;
//boost::shared_ptr<ParticleFilterTracker<PointT, ParticleXYZRPY> > tracker_;
boost::shared_ptr<KLDAdaptiveParticleFilterOMPTracker<PointT, ParticleXYZRPY> > tracker_;
boost::shared_ptr<ReversedParticleFilterOMPTracker<PointT, ParticleXYZRPY> > reversed_tracker_;
//boost::shared_ptr<ReversedParticleFilterTracker<PointT, ParticleXYZRPY> > reversed_tracker_;
boost::mutex mtx_;
bool new_cloud_;
bool track_target_set_;
bool align_box_;
bool change_frame_;
std::string frame_id_;
std::string base_frame_id_;
std::string track_target_name_;
ros::Time stamp_;
ros::Time prev_stamp_;
tf::Transform reference_transform_;
ros::Subscriber sub_;
ros::Subscriber sub_update_model_;
ros::Subscriber sub_update_with_marker_model_;
ros::Publisher pub_latest_time_;
ros::Publisher pub_average_time_;
ros::Publisher pub_rms_distance_;
ros::Publisher pub_rms_angle_;
ros::Publisher pub_velocity_;
ros::Publisher pub_velocity_norm_;
ros::Publisher pub_no_move_;
ros::Publisher pub_no_move_raw_;
ros::Publisher pub_skipped_;
ros::Publisher pub_change_cloud_marker_;
ros::Publisher pub_tracker_status_;
jsk_recognition_utils::WallDurationTimer timer_;
Eigen::Affine3f initial_pose_;
boost::circular_buffer<double> distance_error_buffer_;
boost::circular_buffer<double> angle_error_buffer_;
jsk_recognition_utils::SeriesedBoolean no_move_buffer_;
message_filters::Subscriber<sensor_msgs::PointCloud2> sub_input_;
message_filters::Subscriber<jsk_recognition_msgs::BoundingBox> sub_box_;
message_filters::Subscriber<sensor_msgs::PointCloud2> sub_input_cloud_;
message_filters::Subscriber<sensor_msgs::PointCloud2> sub_change_cloud_;
boost::shared_ptr<message_filters::Synchronizer<SyncPolicy> > sync_;
boost::shared_ptr<message_filters::Synchronizer<SyncChangePolicy> > change_sync_;
ros::Publisher particle_publisher_;
ros::Publisher track_result_publisher_;
ros::Publisher pose_stamped_publisher_;
ros::ServiceServer renew_model_srv_;
boost::shared_ptr <dynamic_reconfigure::Server<Config> > srv_;
tf::TransformListener* listener_;
////////////////////////////////////////////////////////
// parameters
////////////////////////////////////////////////////////
bool use_change_detection_;
int max_particle_num_;
double delta_;
double epsilon_;
int iteration_num_;
double resample_likelihood_thr_;
ParticleXYZRPY bin_size_;
ParticleXYZRPY prev_result_;
int counter_;
std::vector<double> default_step_covariance_;
bool reversed_;
bool not_use_reference_centroid_;
bool not_publish_tf_;
int marker_to_pointcloud_sampling_nums_;
double static_velocity_thr_;
double change_cloud_near_threshold_;
virtual void config_callback(Config &config, uint32_t level);
virtual void publish_particles();
virtual void publish_result();
virtual std::string reference_frame_id();
virtual void reset_tracking_target_model(
const pcl::PointCloud<PointT>::ConstPtr &new_target_cloud);
virtual tf::Transform change_pointcloud_frame(
pcl::PointCloud<PointT>::Ptr cloud);
virtual double rms(boost::circular_buffer<double>& buffer) {
double res = 0.0;
for (size_t i = 0; i < buffer.size(); i++) {
res += buffer[i] * buffer[i];
}
return sqrt(res / buffer.size());
}
virtual void cloud_cb(const sensor_msgs::PointCloud2 &pc);
virtual void cloud_change_cb(const sensor_msgs::PointCloud2::ConstPtr &pc, const sensor_msgs::PointCloud2::ConstPtr &chnage_cloud);
virtual bool renew_model_cb(
jsk_recognition_msgs::SetPointCloud2::Request &req,
jsk_recognition_msgs::SetPointCloud2::Response &response);
virtual void renew_model_with_box_topic_cb(
const sensor_msgs::PointCloud2::ConstPtr &pc_ptr,
const jsk_recognition_msgs::BoundingBox::ConstPtr &bb_ptr);
virtual void renew_model_topic_cb(const sensor_msgs::PointCloud2 &pc);
virtual void renew_model_with_marker_topic_cb(const visualization_msgs::Marker &marker);
virtual void publish_tracker_status(const std_msgs::Header& header,
const bool is_tracking);
////////////////////////////////////////////////////////
// Wrap particle filter methods
////////////////////////////////////////////////////////
virtual void tracker_set_trans(const Eigen::Affine3f& trans);
virtual void tracker_set_step_noise_covariance(
const std::vector<double>& covariance);
virtual void tracker_set_initial_noise_covariance(
const std::vector<double>& covariance);
virtual void tracker_set_initial_noise_mean(
const std::vector<double>& mean);
virtual void tracker_set_iteration_num(const int num);
virtual void tracker_set_particle_num(const int num);
virtual void tracker_set_resample_likelihood_thr(double thr);
virtual void tracker_set_use_normal(bool use_normal);
virtual void tracker_set_cloud_coherence(
ApproxNearestPairPointCloudCoherence<PointT>::Ptr coherence);
virtual void tracker_set_maximum_particle_num(int num);
virtual void tracker_set_delta(double delta);
virtual void tracker_set_epsilon(double epsilon);
virtual void tracker_set_bin_size(const ParticleXYZRPY bin_size);
virtual ParticleFilterTracker<PointT, ParticleXYZRPY>::PointCloudStatePtr
tracker_get_particles();
virtual ParticleXYZRPY tracker_get_result();
virtual Eigen::Affine3f tracker_to_eigen_matrix(
const ParticleXYZRPY& result);
virtual pcl::PointCloud<PointT>::ConstPtr tracker_get_reference_cloud();
virtual void tracker_set_reference_cloud(pcl::PointCloud<PointT>::Ptr ref);
virtual void tracker_reset_tracking();
virtual void tracker_set_input_cloud(pcl::PointCloud<PointT>::Ptr input);
virtual void tracker_compute();
virtual void subscribe() {}
virtual void unsubscribe() {}
private:
virtual void onInit();
};
}
#endif
|
project.c | //-----------------------------------------------------------------------------
// project.c
//
// Project: EPA SWMM5
// Version: 5.1
// Date: 03/19/14 (Build 5.1.000)
// 04/14/14 (Build 5.1.004)
// 09/15/14 (Build 5.1.007)
// 03/19/15 (Build 5.1.008)
// 04/30/15 (Build 5.1.009)
// 08/01/16 (Build 5.1.011)
// 03/14/17 (Build 5.1.012)
// Author: L. Rossman
//
// Project management functions.
//
// This module provides project-related services such as:
// o opening a new project and reading its input data
// o allocating and freeing memory for project objects
// o setting default values for object properties and options
// o initializing the internal state of all objects
// o managing hash tables for identifying objects by ID name
//
// Build 5.1.004:
// - Ignore RDII option added.
//
// Build 5.1.007:
// - Default monthly adjustments for climate variables included.
// - User-supplied GW flow equaitions initialized to NULL.
// - Storage node exfiltration object initialized to NULL.
// - Freeing of memory used for storage node exfiltration included.
//
// Build 5.1.008:
// - Constants used for dynamic wave routing moved to dynwave.c.
// - Input processing of minimum time step & number of
// parallel threads for dynamic wave routing added.
// - Default values of hyd. conductivity adjustments added.
// - Freeing of memory used for outfall pollutant load added.
//
// Build 5.1.009:
// - Fixed bug in computing total duration introduced in 5.1.008.
//
// Build 5.1.011:
// - Memory management of hydraulic event dates array added.
//
// Build 5.1.012:
// - Minimum conduit slope option initialized to 0 (none).
// - NO/YES no longer accepted as options for NORMAL_FLOW_LIMITED.
//
//-----------------------------------------------------------------------------
#define _CRT_SECURE_NO_DEPRECATE
#include <stdlib.h>
#include <string.h>
#include <stdlib.h>
#include <math.h> //(5.1.008)
#if defined(_OPENMP)
#include <omp.h> //(5.1.008)
#else
int omp_get_num_threads(void) { return 1;}
#endif
#include "headers.h"
#include "lid.h"
#include "hash.h"
#include "mempool.h"
#include "swmm5.h"
//-----------------------------------------------------------------------------
// Constants
//-----------------------------------------------------------------------------
//// Constants for DYNWAVE flow routing moved to dynwave.c. //// //(5.1.008)
//-----------------------------------------------------------------------------
// Shared variables
//-----------------------------------------------------------------------------
static HTtable* Htable[MAX_OBJ_TYPES]; // Hash tables for object ID names
static char MemPoolAllocated; // TRUE if memory pool allocated
//-----------------------------------------------------------------------------
// External Functions (declared in funcs.h)
//-----------------------------------------------------------------------------
// project_open (called from swmm_open in swmm5.c)
// project_close (called from swmm_close in swmm5.c)
// project_readInput (called from swmm_open in swmm5.c)
// project_readOption (called from readOption in input.c)
// project_validate (called from swmm_open in swmm5.c)
// project_init (called from swmm_start in swmm5.c)
// project_addObject (called from addObject in input.c)
// project_createMatrix (called from openFileForInput in iface.c)
// project_freeMatrix (called from iface_closeRoutingFiles)
// project_findObject
// project_findID
//-----------------------------------------------------------------------------
// Function declarations
//-----------------------------------------------------------------------------
static void initPointers(void);
static void setDefaults(void);
static void openFiles(char *f1, char *f2, char *f3);
static void createObjects(void);
static void deleteObjects(void);
static void createHashTables(void);
static void deleteHashTables(void);
//=============================================================================
void project_open(char *f1, char *f2, char *f3)
//
// Input: f1 = pointer to name of input file
// f2 = pointer to name of report file
// f3 = pointer to name of binary output file
// Output: none
// Purpose: opens a new SWMM project.
//
{
initPointers();
setDefaults();
openFiles(f1, f2, f3);
}
//=============================================================================
void project_readInput()
//
// Input: none
// Output: none
// Purpose: retrieves project data from input file.
//
{
// --- create hash tables for fast retrieval of objects by ID names
createHashTables();
// --- count number of objects in input file and create them
input_countObjects();
createObjects();
// --- read project data from input file
input_readData();
if ( ErrorCode ) return;
// --- establish starting & ending date/time
StartDateTime = StartDate + StartTime;
EndDateTime = EndDate + EndTime;
ReportStart = ReportStartDate + ReportStartTime;
ReportStart = MAX(ReportStart, StartDateTime);
// --- check for valid starting & ending date/times
if ( EndDateTime <= StartDateTime )
{
report_writeErrorMsg(ERR_START_DATE, "");
}
else if ( EndDateTime <= ReportStart )
{
report_writeErrorMsg(ERR_REPORT_DATE, "");
}
else
{
//// Following code segment was modified for release 5.1.009. //// //(5.1.009)
////
// --- compute total duration of simulation in seconds
TotalDuration = floor((EndDateTime - StartDateTime) * SECperDAY);
// --- reporting step must be <= total duration
if ( (double)ReportStep > TotalDuration )
{
ReportStep = (int)(TotalDuration);
}
// --- reporting step can't be < routing step
if ( (double)ReportStep < RouteStep )
{
report_writeErrorMsg(ERR_REPORT_STEP, "");
}
// --- convert total duration to milliseconds
TotalDuration *= 1000.0;
}
////
}
//=============================================================================
void project_validate()
//
// Input: none
// Output: none
// Purpose: checks validity of project data.
//
{
int i;
int j;
int err;
// --- validate Curves and TimeSeries
for ( i=0; i<Nobjects[CURVE]; i++ )
{
err = table_validate(&Curve[i]);
if ( err ) report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID);
}
for ( i=0; i<Nobjects[TSERIES]; i++ )
{
err = table_validate(&Tseries[i]);
if ( err ) report_writeTseriesErrorMsg(err, &Tseries[i]);
}
// --- validate hydrology objects
// (NOTE: order is important !!!!)
climate_validate();
lid_validate();
if ( Nobjects[SNOWMELT] == 0 ) IgnoreSnowmelt = TRUE;
if ( Nobjects[AQUIFER] == 0 ) IgnoreGwater = TRUE;
for ( i=0; i<Nobjects[GAGE]; i++ ) gage_validate(i);
for ( i=0; i<Nobjects[AQUIFER]; i++ ) gwater_validateAquifer(i);
for ( i=0; i<Nobjects[SUBCATCH]; i++ ) subcatch_validate(i);
for ( i=0; i<Nobjects[SNOWMELT]; i++ ) snow_validateSnowmelt(i);
// --- compute geometry tables for each shape curve
j = 0;
for ( i=0; i<Nobjects[CURVE]; i++ )
{
if ( Curve[i].curveType == SHAPE_CURVE )
{
Curve[i].refersTo = j;
Shape[j].curve = i;
if ( !shape_validate(&Shape[j], &Curve[i]) )
report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID);
j++;
}
}
// --- validate links before nodes, since the latter can
// result in adjustment of node depths
for ( i=0; i<Nobjects[NODE]; i++) Node[i].oldDepth = Node[i].fullDepth;
for ( i=0; i<Nobjects[LINK]; i++) link_validate(i);
for ( i=0; i<Nobjects[NODE]; i++) node_validate(i);
// --- adjust time steps if necessary
if ( DryStep < WetStep )
{
report_writeWarningMsg(WARN06, "");
DryStep = WetStep;
}
if ( RouteStep > (double)WetStep )
{
report_writeWarningMsg(WARN07, "");
RouteStep = WetStep;
}
// --- adjust individual reporting flags to match global reporting flag
if ( RptFlags.subcatchments == ALL )
for (i=0; i<Nobjects[SUBCATCH]; i++) Subcatch[i].rptFlag = TRUE;
if ( RptFlags.nodes == ALL )
for (i=0; i<Nobjects[NODE]; i++) Node[i].rptFlag = TRUE;
if ( RptFlags.links == ALL )
for (i=0; i<Nobjects[LINK]; i++) Link[i].rptFlag = TRUE;
// --- validate dynamic wave options
if ( RouteModel == DW ) dynwave_validate(); //(5.1.008)
#pragma omp parallel //(5.1.008)
{
if ( NumThreads == 0 ) NumThreads = omp_get_num_threads(); //(5.1.008)
else NumThreads = MIN(NumThreads, omp_get_num_threads()); //(5.1.008)
}
if ( Nobjects[LINK] < 4 * NumThreads ) NumThreads = 1; //(5.1.008)
}
//=============================================================================
void project_close()
//
// Input: none
// Output: none
// Purpose: closes a SWMM project.
//
{
deleteObjects();
deleteHashTables();
}
//=============================================================================
int project_init(void)
//
// Input: none
// Output: returns an error code
// Purpose: initializes the internal state of all objects.
//
{
int j;
climate_initState();
lid_initState();
for (j=0; j<Nobjects[TSERIES]; j++) table_tseriesInit(&Tseries[j]);
for (j=0; j<Nobjects[GAGE]; j++) gage_initState(j);
for (j=0; j<Nobjects[SUBCATCH]; j++) subcatch_initState(j);
for (j=0; j<Nobjects[NODE]; j++) node_initState(j);
for (j=0; j<Nobjects[LINK]; j++) link_initState(j);
return ErrorCode;
}
//=============================================================================
int project_addObject(int type, char *id, int n)
//
// Input: type = object type
// id = object ID string
// n = object index
// Output: returns 0 if object already added, 1 if not, -1 if hashing fails
// Purpose: adds an object ID to a hash table
//
{
int result;
int len;
char *newID;
// --- do nothing if object already placed in hash table
if ( project_findObject(type, id) >= 0 ) return 0;
// --- use memory from the hash tables' common memory pool to store
// a copy of the object's ID string
len = strlen(id) + 1;
newID = (char *) Alloc(len*sizeof(char));
strcpy(newID, id);
// --- insert object's ID into the hash table for that type of object
result = HTinsert(Htable[type], newID, n);
if ( result == 0 ) result = -1;
return result;
}
//=============================================================================
int DLLEXPORT project_findObject(int type, char *id)
//
// Input: type = object type
// id = object ID
// Output: returns index of object with given ID, or -1 if ID not found
// Purpose: uses hash table to find index of an object with a given ID.
//
{
return HTfind(Htable[type], id);
}
//=============================================================================
char *project_findID(int type, char *id)
//
// Input: type = object type
// id = ID name being sought
// Output: returns pointer to location where object's ID string is stored
// Purpose: uses hash table to find address of given string entry.
//
{
return HTfindKey(Htable[type], id);
}
//=============================================================================
double ** project_createMatrix(int nrows, int ncols)
//
// Input: nrows = number of rows (0-based)
// ncols = number of columns (0-based)
// Output: returns a pointer to a matrix
// Purpose: allocates memory for a matrix of doubles.
//
{
int i,j;
double **a;
// --- allocate pointers to rows
a = (double **) malloc(nrows * sizeof(double *));
if ( !a ) return NULL;
// --- allocate rows and set pointers to them
a[0] = (double *) malloc (nrows * ncols * sizeof(double));
if ( !a[0] ) return NULL;
for ( i = 1; i < nrows; i++ ) a[i] = a[i-1] + ncols;
for ( i = 0; i < nrows; i++)
{
for ( j = 0; j < ncols; j++) a[i][j] = 0.0;
}
// --- return pointer to array of pointers to rows
return a;
}
//=============================================================================
void project_freeMatrix(double **a)
//
// Input: a = matrix of floats
// Output: none
// Purpose: frees memory allocated for a matrix of doubles.
//
{
if ( a != NULL )
{
if ( a[0] != NULL ) free( a[0] );
free( a );
}
}
//=============================================================================
int project_readOption(char* s1, char* s2)
//
// Input: s1 = option keyword
// s2 = string representation of option's value
// Output: returns error code
// Purpose: reads a project option from a pair of string tokens.
//
// NOTE: all project options have default values assigned in setDefaults().
//
{
int k, m, h, s;
double tStep;
char strDate[25];
DateTime aTime;
DateTime aDate;
// --- determine which option is being read
k = findmatch(s1, OptionWords);
if ( k < 0 ) return error_setInpError(ERR_KEYWORD, s1);
switch ( k )
{
// --- choice of flow units
case FLOW_UNITS:
m = findmatch(s2, FlowUnitWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
FlowUnits = m;
if ( FlowUnits <= MGD ) UnitSystem = US;
else UnitSystem = SI;
break;
// --- choice of infiltration modeling method
case INFIL_MODEL:
m = findmatch(s2, InfilModelWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
InfilModel = m;
break;
// --- choice of flow routing method
case ROUTE_MODEL:
m = findmatch(s2, RouteModelWords);
if ( m < 0 ) m = findmatch(s2, OldRouteModelWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
if ( m == NO_ROUTING ) IgnoreRouting = TRUE;
else RouteModel = m;
if ( RouteModel == EKW ) RouteModel = KW;
break;
// --- simulation start date
case START_DATE:
if ( !datetime_strToDate(s2, &StartDate) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- simulation start time of day
case START_TIME:
if ( !datetime_strToTime(s2, &StartTime) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- simulation ending date
case END_DATE:
if ( !datetime_strToDate(s2, &EndDate) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- simulation ending time of day
case END_TIME:
if ( !datetime_strToTime(s2, &EndTime) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- reporting start date
case REPORT_START_DATE:
if ( !datetime_strToDate(s2, &ReportStartDate) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- reporting start time of day
case REPORT_START_TIME:
if ( !datetime_strToTime(s2, &ReportStartTime) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- day of year when street sweeping begins or when it ends
// (year is arbitrarily set to 1947 so that the dayOfYear
// function can be applied)
case SWEEP_START:
case SWEEP_END:
strcpy(strDate, s2);
strcat(strDate, "/1947");
if ( !datetime_strToDate(strDate, &aDate) )
{
return error_setInpError(ERR_DATETIME, s2);
}
m = datetime_dayOfYear(aDate);
if ( k == SWEEP_START ) SweepStart = m;
else SweepEnd = m;
break;
// --- number of antecedent dry days
case START_DRY_DAYS:
StartDryDays = atof(s2);
if ( StartDryDays < 0.0 )
{
return error_setInpError(ERR_NUMBER, s2);
}
break;
// --- runoff or reporting time steps
// (input is in hrs:min:sec format, time step saved as seconds)
case WET_STEP:
case DRY_STEP:
case REPORT_STEP:
if ( !datetime_strToTime(s2, &aTime) )
{
return error_setInpError(ERR_DATETIME, s2);
}
datetime_decodeTime(aTime, &h, &m, &s);
h += 24*(int)aTime;
s = s + 60*m + 3600*h;
if ( s <= 0 ) return error_setInpError(ERR_NUMBER, s2);
switch ( k )
{
case WET_STEP: WetStep = s; break;
case DRY_STEP: DryStep = s; break;
case REPORT_STEP: ReportStep = s; break;
}
break;
// --- type of damping applied to inertial terms of dynamic wave routing
case INERT_DAMPING:
m = findmatch(s2, InertDampingWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
else InertDamping = m;
break;
// --- Yes/No options (NO = 0, YES = 1)
case ALLOW_PONDING:
case SLOPE_WEIGHTING:
case SKIP_STEADY_STATE:
case IGNORE_RAINFALL:
case IGNORE_SNOWMELT:
case IGNORE_GWATER:
case IGNORE_ROUTING:
case IGNORE_QUALITY:
case IGNORE_RDII: //(5.1.004)
m = findmatch(s2, NoYesWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
switch ( k )
{
case ALLOW_PONDING: AllowPonding = m; break;
case SLOPE_WEIGHTING: SlopeWeighting = m; break;
case SKIP_STEADY_STATE: SkipSteadyState = m; break;
case IGNORE_RAINFALL: IgnoreRainfall = m; break;
case IGNORE_SNOWMELT: IgnoreSnowmelt = m; break;
case IGNORE_GWATER: IgnoreGwater = m; break;
case IGNORE_ROUTING: IgnoreRouting = m; break;
case IGNORE_QUALITY: IgnoreQuality = m; break;
case IGNORE_RDII: IgnoreRDII = m; break; //(5.1.004)
}
break;
case NORMAL_FLOW_LTD:
m = findmatch(s2, NormalFlowWords);
//if ( m < 0 ) m = findmatch(s2, NoYesWords); DEPRECATED //(5.1.012)
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
NormalFlowLtd = m;
break;
case FORCE_MAIN_EQN:
m = findmatch(s2, ForceMainEqnWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
ForceMainEqn = m;
break;
case LINK_OFFSETS:
m = findmatch(s2, LinkOffsetWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
LinkOffsets = m;
break;
// --- compatibility option for selecting solution method for
// dynamic wave flow routing (NOT CURRENTLY USED)
case COMPATIBILITY:
if ( strcomp(s2, "3") ) Compatibility = SWMM3;
else if ( strcomp(s2, "4") ) Compatibility = SWMM4;
else if ( strcomp(s2, "5") ) Compatibility = SWMM5;
else return error_setInpError(ERR_KEYWORD, s2);
break;
// --- routing or lengthening time step (in decimal seconds)
// (lengthening time step is used in Courant stability formula
// to artificially lengthen conduits for dynamic wave flow routing
// (a value of 0 means that no lengthening is used))
case ROUTE_STEP:
case LENGTHENING_STEP:
if ( !getDouble(s2, &tStep) )
{
if ( !datetime_strToTime(s2, &aTime) )
{
return error_setInpError(ERR_NUMBER, s2);
}
else
{
datetime_decodeTime(aTime, &h, &m, &s);
h += 24*(int)aTime;
s = s + 60*m + 3600*h;
tStep = s;
}
}
if ( k == ROUTE_STEP )
{
if ( tStep <= 0.0 ) return error_setInpError(ERR_NUMBER, s2);
RouteStep = tStep;
}
else LengtheningStep = MAX(0.0, tStep);
break;
//// Following code section added to release 5.1.008. //// //(5.1.008)
// --- minimum variable time step for dynamic wave routing
case MIN_ROUTE_STEP:
if ( !getDouble(s2, &MinRouteStep) || MinRouteStep < 0.0 )
return error_setInpError(ERR_NUMBER, s2);
break;
case NUM_THREADS:
m = atoi(s2);
if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2);
NumThreads = m;
break;
////
// --- safety factor applied to variable time step estimates under
// dynamic wave flow routing (value of 0 indicates that variable
// time step option not used)
case VARIABLE_STEP:
if ( !getDouble(s2, &CourantFactor) )
return error_setInpError(ERR_NUMBER, s2);
if ( CourantFactor < 0.0 || CourantFactor > 2.0 )
return error_setInpError(ERR_NUMBER, s2);
break;
// --- minimum surface area (ft2 or sq. meters) associated with nodes
// under dynamic wave flow routing
case MIN_SURFAREA:
MinSurfArea = atof(s2);
break;
// --- minimum conduit slope (%)
case MIN_SLOPE:
if ( !getDouble(s2, &MinSlope) )
return error_setInpError(ERR_NUMBER, s2);
if ( MinSlope < 0.0 || MinSlope >= 100 )
return error_setInpError(ERR_NUMBER, s2);
MinSlope /= 100.0;
break;
// --- maximum trials / time step for dynamic wave routing
case MAX_TRIALS:
m = atoi(s2);
if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2);
MaxTrials = m;
break;
// --- head convergence tolerance for dynamic wave routing
case HEAD_TOL:
if ( !getDouble(s2, &HeadTol) )
{
return error_setInpError(ERR_NUMBER, s2);
}
break;
// --- steady state tolerance on system inflow - outflow
case SYS_FLOW_TOL:
if ( !getDouble(s2, &SysFlowTol) )
{
return error_setInpError(ERR_NUMBER, s2);
}
SysFlowTol /= 100.0;
break;
// --- steady state tolerance on nodal lateral inflow
case LAT_FLOW_TOL:
if ( !getDouble(s2, &LatFlowTol) )
{
return error_setInpError(ERR_NUMBER, s2);
}
LatFlowTol /= 100.0;
break;
case TEMPDIR: // Temporary Directory
sstrncpy(TempDir, s2, MAXFNAME);
break;
}
return 0;
}
//=============================================================================
void initPointers()
//
// Input: none
// Output: none
// Purpose: assigns NULL to all dynamic arrays for a new project.
//
{
Gage = NULL;
Subcatch = NULL;
Node = NULL;
Outfall = NULL;
Divider = NULL;
Storage = NULL;
Link = NULL;
Conduit = NULL;
Pump = NULL;
Orifice = NULL;
Weir = NULL;
Outlet = NULL;
Pollut = NULL;
Landuse = NULL;
Pattern = NULL;
Curve = NULL;
Tseries = NULL;
Transect = NULL;
Shape = NULL;
Aquifer = NULL;
UnitHyd = NULL;
Snowmelt = NULL;
Event = NULL; //(5.1.011)
MemPoolAllocated = FALSE;
}
//=============================================================================
void setDefaults()
//
// Input: none
// Output: none
// Purpose: assigns default values to project variables.
//
{
int i, j;
// Project title & temp. file path
for (i = 0; i < MAXTITLE; i++) strcpy(Title[i], "");
strcpy(TempDir, "");
// Interface files
Frain.mode = SCRATCH_FILE; // Use scratch rainfall file
Fclimate.mode = NO_FILE;
Frunoff.mode = NO_FILE;
Frdii.mode = NO_FILE;
Fhotstart1.mode = NO_FILE;
Fhotstart2.mode = NO_FILE;
Finflows.mode = NO_FILE;
Foutflows.mode = NO_FILE;
Frain.file = NULL;
Fclimate.file = NULL;
Frunoff.file = NULL;
Frdii.file = NULL;
Fhotstart1.file = NULL;
Fhotstart2.file = NULL;
Finflows.file = NULL;
Foutflows.file = NULL;
Fout.file = NULL;
Fout.mode = NO_FILE;
// Analysis options
UnitSystem = US; // US unit system
FlowUnits = CFS; // CFS flow units
InfilModel = HORTON; // Horton infiltration method
RouteModel = KW; // Kin. wave flow routing method
AllowPonding = FALSE; // No ponding at nodes
InertDamping = SOME; // Partial inertial damping
NormalFlowLtd = BOTH; // Default normal flow limitation
ForceMainEqn = H_W; // Hazen-Williams eqn. for force mains
LinkOffsets = DEPTH_OFFSET; // Use depth for link offsets
LengtheningStep = 0; // No lengthening of conduits
CourantFactor = 0.0; // No variable time step
MinSurfArea = 0.0; // Force use of default min. surface area
MinSlope = 0.0; // No user supplied minimum conduit slope //(5.1.012)
SkipSteadyState = FALSE; // Do flow routing in steady state periods
IgnoreRainfall = FALSE; // Analyze rainfall/runoff
IgnoreRDII = FALSE; // Analyze RDII //(5.1.004)
IgnoreSnowmelt = FALSE; // Analyze snowmelt
IgnoreGwater = FALSE; // Analyze groundwater
IgnoreRouting = FALSE; // Analyze flow routing
IgnoreQuality = FALSE; // Analyze water quality
WetStep = 300; // Runoff wet time step (secs)
DryStep = 3600; // Runoff dry time step (secs)
RouteStep = 300.0; // Routing time step (secs)
MinRouteStep = 0.5; // Minimum variable time step (sec) //(5.1.008)
ReportStep = 900; // Reporting time step (secs)
StartDryDays = 0.0; // Antecedent dry days
MaxTrials = 0; // Force use of default max. trials
HeadTol = 0.0; // Force use of default head tolerance
SysFlowTol = 0.05; // System flow tolerance for steady state
LatFlowTol = 0.05; // Lateral flow tolerance for steady state
NumThreads = 0; // Number of parallel threads to use
NumEvents = 0; // Number of detailed routing events //(5.1.011)
// Deprecated options
SlopeWeighting = TRUE; // Use slope weighting
Compatibility = SWMM4; // Use SWMM 4 up/dn weighting method
// Starting & ending date/time
StartDate = datetime_encodeDate(2004, 1, 1);
StartTime = datetime_encodeTime(0,0,0);
StartDateTime = StartDate + StartTime;
EndDate = StartDate;
EndTime = 0.0;
ReportStartDate = NO_DATE;
ReportStartTime = NO_DATE;
SweepStart = 1;
SweepEnd = 365;
// Reporting options
RptFlags.input = FALSE;
RptFlags.continuity = TRUE;
RptFlags.flowStats = TRUE;
RptFlags.controls = FALSE;
RptFlags.subcatchments = FALSE;
RptFlags.nodes = FALSE;
RptFlags.links = FALSE;
RptFlags.nodeStats = FALSE;
// Temperature data
Temp.dataSource = NO_TEMP;
Temp.tSeries = -1;
Temp.ta = 70.0;
Temp.elev = 0.0;
Temp.anglat = 40.0;
Temp.dtlong = 0.0;
Temp.tmax = MISSING;
// Wind speed data
Wind.type = MONTHLY_WIND;
for ( i=0; i<12; i++ ) Wind.aws[i] = 0.0;
// Snowmelt parameters
Snow.snotmp = 34.0;
Snow.tipm = 0.5;
Snow.rnm = 0.6;
// Snow areal depletion curves for pervious and impervious surfaces
for ( i=0; i<2; i++ )
{
for ( j=0; j<10; j++) Snow.adc[i][j] = 1.0;
}
// Evaporation rates
Evap.type = CONSTANT_EVAP;
for (i=0; i<12; i++)
{
Evap.monthlyEvap[i] = 0.0;
Evap.panCoeff[i] = 1.0;
}
Evap.recoveryPattern = -1;
Evap.recoveryFactor = 1.0;
Evap.tSeries = -1;
Evap.dryOnly = FALSE;
//// Following code segment added to release 5.1.007. //// //(5.1.007)
////
// Climate adjustments
for (i = 0; i < 12; i++)
{
Adjust.temp[i] = 0.0; // additive adjustments
Adjust.evap[i] = 0.0; // additive adjustments
Adjust.rain[i] = 1.0; // multiplicative adjustments
Adjust.hydcon[i] = 1.0; // hyd. conductivity adjustments //(5.1.008)
}
Adjust.rainFactor = 1.0;
Adjust.hydconFactor = 1.0; //(5.1.008)
////
}
//=============================================================================
void openFiles(char *f1, char *f2, char *f3)
//
// Input: f1 = name of input file
// f2 = name of report file
// f3 = name of binary output file
// Output: none
// Purpose: opens a project's input and report files.
//
{
// --- initialize file pointers to NULL
Finp.file = NULL;
Frpt.file = NULL;
Fout.file = NULL;
// --- save file names
sstrncpy(Finp.name, f1, MAXFNAME);
sstrncpy(Frpt.name, f2, MAXFNAME);
sstrncpy(Fout.name, f3, MAXFNAME);
// --- check that file names are not identical
if (strcomp(f1, f2) || strcomp(f1, f3) || strcomp(f2, f3))
{
writecon(FMT11);
ErrorCode = ERR_FILE_NAME;
return;
}
// --- open input and report files
if ((Finp.file = fopen(f1,"rt")) == NULL)
{
writecon(FMT12);
writecon(f1);
ErrorCode = ERR_INP_FILE;
return;
}
if ((Frpt.file = fopen(f2,"wt")) == NULL)
{
writecon(FMT13);
ErrorCode = ERR_RPT_FILE;
return;
}
}
//=============================================================================
void createObjects()
//
// Input: none
// Output: none
// Purpose: allocates memory for project's objects.
//
// NOTE: number of each type of object has already been determined in
// project_readInput().
//
{
int j, k;
// --- allocate memory for each category of object
if ( ErrorCode ) return;
Gage = (TGage *) calloc(Nobjects[GAGE], sizeof(TGage));
Subcatch = (TSubcatch *) calloc(Nobjects[SUBCATCH], sizeof(TSubcatch));
Node = (TNode *) calloc(Nobjects[NODE], sizeof(TNode));
Outfall = (TOutfall *) calloc(Nnodes[OUTFALL], sizeof(TOutfall));
Divider = (TDivider *) calloc(Nnodes[DIVIDER], sizeof(TDivider));
Storage = (TStorage *) calloc(Nnodes[STORAGE], sizeof(TStorage));
Link = (TLink *) calloc(Nobjects[LINK], sizeof(TLink));
Conduit = (TConduit *) calloc(Nlinks[CONDUIT], sizeof(TConduit));
Pump = (TPump *) calloc(Nlinks[PUMP], sizeof(TPump));
Orifice = (TOrifice *) calloc(Nlinks[ORIFICE], sizeof(TOrifice));
Weir = (TWeir *) calloc(Nlinks[WEIR], sizeof(TWeir));
Outlet = (TOutlet *) calloc(Nlinks[OUTLET], sizeof(TOutlet));
Pollut = (TPollut *) calloc(Nobjects[POLLUT], sizeof(TPollut));
Landuse = (TLanduse *) calloc(Nobjects[LANDUSE], sizeof(TLanduse));
Pattern = (TPattern *) calloc(Nobjects[TIMEPATTERN], sizeof(TPattern));
Curve = (TTable *) calloc(Nobjects[CURVE], sizeof(TTable));
Tseries = (TTable *) calloc(Nobjects[TSERIES], sizeof(TTable));
Aquifer = (TAquifer *) calloc(Nobjects[AQUIFER], sizeof(TAquifer));
UnitHyd = (TUnitHyd *) calloc(Nobjects[UNITHYD], sizeof(TUnitHyd));
Snowmelt = (TSnowmelt *) calloc(Nobjects[SNOWMELT], sizeof(TSnowmelt));
Shape = (TShape *) calloc(Nobjects[SHAPE], sizeof(TShape));
//// Added to release 5.1.011. //// //(5.1.011)
// --- create array of detailed routing event periods
Event = (TEvent *) calloc(NumEvents+1, sizeof(TEvent));
Event[NumEvents].start = BIG;
Event[NumEvents].end = BIG + 1.0;
////
// --- create LID objects
lid_create(Nobjects[LID], Nobjects[SUBCATCH]);
// --- create control rules
ErrorCode = controls_create(Nobjects[CONTROL]);
if ( ErrorCode ) return;
// --- create cross section transects
ErrorCode = transect_create(Nobjects[TRANSECT]);
if ( ErrorCode ) return;
// --- allocate memory for infiltration data
infil_create(Nobjects[SUBCATCH], InfilModel);
// --- allocate memory for water quality state variables
for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
Subcatch[j].initBuildup =
(double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].pondedQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].concPonded = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].surfaceBuildup = (double *) calloc(Nobjects[POLLUT], sizeof(double));
}
for (j = 0; j < Nobjects[NODE]; j++)
{
Node[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Node[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Node[j].extInflow = NULL;
Node[j].dwfInflow = NULL;
Node[j].rdiiInflow = NULL;
Node[j].treatment = NULL;
}
for (j = 0; j < Nobjects[LINK]; j++)
{
Link[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Link[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Link[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double));
}
// --- allocate memory for land use buildup/washoff functions
for (j = 0; j < Nobjects[LANDUSE]; j++)
{
Landuse[j].buildupFunc =
(TBuildup *) calloc(Nobjects[POLLUT], sizeof(TBuildup));
Landuse[j].washoffFunc =
(TWashoff *) calloc(Nobjects[POLLUT], sizeof(TWashoff));
}
// --- allocate memory for subcatchment landuse factors
for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
Subcatch[j].landFactor =
(TLandFactor *) calloc(Nobjects[LANDUSE], sizeof(TLandFactor));
for (k = 0; k < Nobjects[LANDUSE]; k++)
{
Subcatch[j].landFactor[k].buildup =
(double *) calloc(Nobjects[POLLUT], sizeof(double));
}
}
// --- initialize buildup & washoff functions
for (j = 0; j < Nobjects[LANDUSE]; j++)
{
for (k = 0; k < Nobjects[POLLUT]; k++)
{
Landuse[j].buildupFunc[k].funcType = NO_BUILDUP;
Landuse[j].buildupFunc[k].normalizer = PER_AREA;
Landuse[j].washoffFunc[k].funcType = NO_WASHOFF;
}
}
// --- initialize rain gage properties
for (j = 0; j < Nobjects[GAGE]; j++)
{
Gage[j].tSeries = -1;
strcpy(Gage[j].fname, "");
}
// --- initialize subcatchment properties
for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
Subcatch[j].outSubcatch = -1;
Subcatch[j].outNode = -1;
Subcatch[j].infil = -1;
Subcatch[j].groundwater = NULL;
Subcatch[j].gwLatFlowExpr = NULL; //(5.1.007)
Subcatch[j].gwDeepFlowExpr = NULL; //(5.1.007)
Subcatch[j].snowpack = NULL;
Subcatch[j].lidArea = 0.0;
for (k = 0; k < Nobjects[POLLUT]; k++)
{
Subcatch[j].initBuildup[k] = 0.0;
}
}
// --- initialize RDII unit hydrograph properties
for ( j = 0; j < Nobjects[UNITHYD]; j++ ) rdii_initUnitHyd(j);
// --- initialize snowmelt properties
for ( j = 0; j < Nobjects[SNOWMELT]; j++ ) snow_initSnowmelt(j);
// --- initialize storage node exfiltration //(5.1.007)
for (j = 0; j < Nnodes[STORAGE]; j++) Storage[j].exfil = NULL; //(5.1.007)
// --- initialize link properties
for (j = 0; j < Nobjects[LINK]; j++)
{
Link[j].xsect.type = -1;
Link[j].cLossInlet = 0.0;
Link[j].cLossOutlet = 0.0;
Link[j].cLossAvg = 0.0;
Link[j].hasFlapGate = FALSE;
}
for (j = 0; j < Nlinks[PUMP]; j++) Pump[j].pumpCurve = -1;
// --- initialize reporting flags
for (j = 0; j < Nobjects[SUBCATCH]; j++) Subcatch[j].rptFlag = FALSE;
for (j = 0; j < Nobjects[NODE]; j++) Node[j].rptFlag = FALSE;
for (j = 0; j < Nobjects[LINK]; j++) Link[j].rptFlag = FALSE;
// --- initialize curves, time series, and time patterns
for (j = 0; j < Nobjects[CURVE]; j++) table_init(&Curve[j]);
for (j = 0; j < Nobjects[TSERIES]; j++) table_init(&Tseries[j]);
for (j = 0; j < Nobjects[TIMEPATTERN]; j++) inflow_initDwfPattern(j);
}
//=============================================================================
void deleteObjects()
//
// Input: none
// Output: none
// Purpose: frees memory allocated for a project's objects.
//
// NOTE: care is taken to first free objects that are properties of another
// object before the latter is freed (e.g., we must free a
// subcatchment's land use factors before freeing the subcatchment).
//
{
int j, k;
// --- free memory for landuse factors & groundwater
if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
for (k = 0; k < Nobjects[LANDUSE]; k++)
{
FREE(Subcatch[j].landFactor[k].buildup);
}
FREE(Subcatch[j].landFactor);
FREE(Subcatch[j].groundwater);
gwater_deleteFlowExpression(j);
FREE(Subcatch[j].snowpack);
}
// --- free memory for buildup/washoff functions
if ( Landuse ) for (j = 0; j < Nobjects[LANDUSE]; j++)
{
FREE(Landuse[j].buildupFunc);
FREE(Landuse[j].washoffFunc)
}
// --- free memory for water quality state variables
if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
FREE(Subcatch[j].initBuildup);
FREE(Subcatch[j].oldQual);
FREE(Subcatch[j].newQual);
FREE(Subcatch[j].pondedQual);
FREE(Subcatch[j].totalLoad);
}
if ( Node ) for (j = 0; j < Nobjects[NODE]; j++)
{
FREE(Node[j].oldQual);
FREE(Node[j].newQual);
}
if ( Link ) for (j = 0; j < Nobjects[LINK]; j++)
{
FREE(Link[j].oldQual);
FREE(Link[j].newQual);
FREE(Link[j].totalLoad);
}
// --- free memory used for rainfall infiltration
infil_delete();
//// Added for release 5.1.007. //// //(5.1.007)
////
// --- free memory used for storage exfiltration
if ( Node ) for (j = 0; j < Nnodes[STORAGE]; j++)
{
if ( Storage[j].exfil )
{
FREE(Storage[j].exfil->btmExfil);
FREE(Storage[j].exfil->bankExfil);
FREE(Storage[j].exfil);
}
}
////
// --- free memory used for outfall pollutants loads //(5.1.008)
if ( Node ) for (j = 0; j < Nnodes[OUTFALL]; j++) //(5.1.008)
FREE(Outfall[j].wRouted); //(5.1.008)
// --- free memory used for nodal inflows & treatment functions
if ( Node ) for (j = 0; j < Nobjects[NODE]; j++)
{
inflow_deleteExtInflows(j);
inflow_deleteDwfInflows(j);
rdii_deleteRdiiInflow(j);
treatmnt_delete(j);
}
// --- delete table entries for curves and time series
if ( Tseries ) for (j = 0; j < Nobjects[TSERIES]; j++)
table_deleteEntries(&Tseries[j]);
if ( Curve ) for (j = 0; j < Nobjects[CURVE]; j++)
table_deleteEntries(&Curve[j]);
// --- delete cross section transects
transect_delete();
// --- delete control rules
controls_delete();
// --- delete LIDs
lid_delete();
// --- now free each major category of object
FREE(Gage);
FREE(Subcatch);
FREE(Node);
FREE(Outfall);
FREE(Divider);
FREE(Storage);
FREE(Link);
FREE(Conduit);
FREE(Pump);
FREE(Orifice);
FREE(Weir);
FREE(Outlet);
FREE(Pollut);
FREE(Landuse);
FREE(Pattern);
FREE(Curve);
FREE(Tseries);
FREE(Aquifer);
FREE(UnitHyd);
FREE(Snowmelt);
FREE(Shape);
FREE(Event); //(5.1.011)
}
//=============================================================================
void createHashTables()
//
// Input: none
// Output: returns error code
// Purpose: allocates memory for object ID hash tables
//
{ int j;
MemPoolAllocated = FALSE;
for (j = 0; j < MAX_OBJ_TYPES ; j++)
{
Htable[j] = HTcreate();
if ( Htable[j] == NULL ) report_writeErrorMsg(ERR_MEMORY, "");
}
// --- initialize memory pool used to store object ID's
if ( AllocInit() == NULL ) report_writeErrorMsg(ERR_MEMORY, "");
else MemPoolAllocated = TRUE;
}
//=============================================================================
void deleteHashTables()
//
// Input: none
// Output: none
// Purpose: frees memory allocated for object ID hash tables
//
{
int j;
for (j = 0; j < MAX_OBJ_TYPES; j++)
{
if ( Htable[j] != NULL ) HTfree(Htable[j]);
}
// --- free object ID memory pool
if ( MemPoolAllocated ) AllocFreePool();
}
//=============================================================================
|
GB_binop__isge_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isge_uint32
// A.*B function (eWiseMult): GB_AemultB__isge_uint32
// A*D function (colscale): GB_AxD__isge_uint32
// D*A function (rowscale): GB_DxB__isge_uint32
// C+=B function (dense accum): GB_Cdense_accumB__isge_uint32
// C+=b function (dense accum): GB_Cdense_accumb__isge_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_uint32
// C=scalar+B GB_bind1st__isge_uint32
// C=scalar+B' GB_bind1st_tran__isge_uint32
// C=A+scalar GB_bind2nd__isge_uint32
// C=A'+scalar GB_bind2nd_tran__isge_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT32 || GxB_NO_ISGE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isge_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isge_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isge_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isge_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isge_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isge_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isge_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isge_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isge_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__isge_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__isge_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dot_product_tiled.c | /*
* OpenMP implementation of dot product calculation.
* This program is used as the driving example in demos in the module Heterogeneous Programming with OpenMP
*
* @author Apan Qasem
*/
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include <omp.h>
#define REPS 100
double t0;
double mysecond() {
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
int main(int argc, char *argv[]) {
int M = atoi(argv[1]); // size of vectors
int N = atoi(argv[2]); // number of OpenMP threads
float*a, *b;
a = (float*) malloc(sizeof(float) * M);
b = (float*) malloc(sizeof(float) * M);
int i, j, k;
for (i = 0; i < M; i++) {
a[i] = i;
b[i] = i + 3;
}
omp_set_num_threads(N);
float sum = 0;
t0 = mysecond();
for (k = 0; k < M; k = k + 1000) {
for (j = 0; j < 100; j++) {
#pragma omp parallel for reduction(+:sum) schedule(static, 1024)
for (i = k; i < (k + 1000); i++)
sum += a[i] * b[i];
}
}
t0 = (mysecond() - t0) * 1.e3;
fprintf(stdout, "result = %1.3e\n", sum);
fprintf(stdout, "parallel loop = %3.2f ms\n", t0);
return 0;
}
|
GrB_IndexUnaryOp_wait.c | //------------------------------------------------------------------------------
// GrB_IndexUnaryOp_wait: wait for a user-defined GrB_IndexUnaryOp to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// In SuiteSparse:GraphBLAS, a user-defined GrB_IndexUnaryOp has no pending
// operations to wait for. All this method does is verify that the op is
// properly initialized, and then it does an OpenMP flush.
#include "GB.h"
GrB_Info GrB_IndexUnaryOp_wait // no work, just check if valid
(
#if (GxB_IMPLEMENTATION_MAJOR <= 5)
GrB_IndexUnaryOp *op
#else
GrB_IndexUnaryOp op,
GrB_WaitMode waitmode
#endif
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
#if (GxB_IMPLEMENTATION_MAJOR <= 5)
GB_WHERE1 ("GrB_IndexUnaryOp_wait (&op)") ;
GB_RETURN_IF_NULL (op) ;
GB_RETURN_IF_NULL_OR_FAULTY (*op) ;
#else
GB_WHERE1 ("GrB_IndexUnaryOp_wait (op, waitmode)") ;
GB_RETURN_IF_NULL_OR_FAULTY (op) ;
#endif
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
TRPOCpuCode.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include "omp.h"
#include "TRPO.h"
#include "Maxfiles.h"
#include "MaxSLiCInterface.h"
// Utility function calculating the number of trainable parameters
size_t NumParamsCalc (size_t * LayerSize, size_t NumLayers) {
size_t NumParams = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
// Weight and Bias
NumParams += LayerSize[i] * LayerSize[i+1] + LayerSize[i+1];
}
// Std
NumParams += LayerSize[NumLayers-1];
return NumParams;
}
// Utility Function Calculating the Max
static inline double max(double record, double cur) {
double result = (record<fabs(cur)) ? fabs(cur) : record;
return result;
}
double FVP (TRPOparam param, double *Result, double *Input)
{
//////////////////// Remarks ////////////////////
// This function computes the Fisher-Vector Product using Pearlmutter Algorithm
// Input: the vector to be multiplied with the Fisher Information Matrix
// Result: the Fisher-Vector Product
// Remarks: The length of Input and Result must be the number of all trainable parameters in the network
// Step1: ordinary forward propagation
// Step2: ordinary backward propagation
// Step3: Pearlmutter forward propagation
// Step4: Pearlmutter backward propagation
//////////////////// Read Parameters ////////////////////
// Assign Parameters
const size_t NumLayers = param.NumLayers;
char * AcFunc = param.AcFunc;
size_t * LayerSize = param.LayerSize;
const size_t NumSamples = param.NumSamples;
char * ModelFile = param.ModelFile;
char * DataFile = param.DataFile;
const double CG_Damping = param.CG_Damping;
// Dimension of Observation Space
const size_t ObservSpaceDim = LayerSize[0];
// Dimension of Action Space
const size_t ActionSpaceDim = LayerSize[NumLayers-1];
// iterator when traversing through input vector and result vector
size_t pos;
//////////////////// Memory Allocation - Neural Network ////////////////////
// W[i]: Weight Matrix from Layer[i] to Layer[i+1]
// B[i]: Bias Vector from Layer[i] to Layer[i+1]
// Item (j,k) in W[i] refers to the weight from Neuron #j in Layer[i] to Neuron #k in Layer[i+1]
// Item B[k] is the bias of Neuron #k in Layer[i+1]
double * W [NumLayers-1];
double * B [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
W[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
B[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// LogStd[i]: log standard deviation for action dimension #i in the Diagonal Gaussian Distribution
double * LogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Input Vector ////////////////////
// The Input Vector is to be multiplied with the Hessian Matrix of KL to derive the Fisher Vector Product
// There is one-to-one correspondence between the input vector and all trainable parameters in the neural network
// As a result, the shape of the Input Vector is the same as that of the parameters in the model
// The only difference is that the Input Vector is stored in a flattened manner
// There is one-to-one correspondence between: VW[i] and W[i], VB[i] and B[i], VStd[i] and Std[i]
double * VW [NumLayers-1];
double * VB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
VW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
VB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// Allocate Memory for Input Vector corresponding to LogStd
double * VLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Simulation Data ////////////////////
// Allocate Memory for Observation and Probability Mean
// Observ: list of observations - corresponds to ob_no in modular_rl
// Mean: list of probablity mean values - corresponds to the 'mean' part of prob_np in modular_rl
// Remarks: due to the specific setting of the experienments in the TRPO paper,
// Std is the same for all samples in each simulation iteration,
// so we just allocate Std memory space for one sample and use it for all samples.
// The general case should be another vector of Std with size NumSamples*ActionSpaceDim
double * Observ = (double *) calloc(NumSamples*ObservSpaceDim, sizeof(double));
double * Mean = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Std = (double *) calloc(ActionSpaceDim, sizeof(double));
// Allocate Memory for Average Sample Mean and Average Sample Mean Square
// Remarks: These values are statistics calculated from the samples, to be used in the algorithm
double * AvgSampleMean = (double *) calloc(ActionSpaceDim, sizeof(double));
double * AvgSampleMeanSq = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Ordinary Forward and Backward Propagation ////////////////////
// Layer[i] : Memory of each layer's outputs, i.e. y_i
// GLayer[I]: Gradient of KL w.r.t. the pre-activation values in Layer[i], i.e. d(KL)/d(x_i)
double * Layer [NumLayers];
double * GLayer [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
Layer[i] = (double *) calloc(LayerSize[i], sizeof(double));
GLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
}
// GW[i]: Gradient of KL w.r.t to Neural Network Weight W[i]
// GB[i]: Gradient of KL w.r.t to Neural Network Bias B[i]
// There is one-to-one correspondence between: GW[i] and W[i], GB[i] and B[i], GStd[i] and Std[i]
double * GW [NumLayers-1];
double * GB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
GW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
GB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// GStd[i]: Gradient of KL w.r.t standard deviation Std[i]
double * GStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Pearlmutter Forward and Backward Propagation ////////////////////
// RyLayer[i]: R{} of each layer's outputs, i.e. R{y_i}
// RxLayer[i]: R{} of each layer's pre-activated outputs, i.e. R{x_i}
// RGLayer[I]: R{} Gradient of KL w.r.t. the pre-activation values in Layer[i], i.e. R{d(KL)/d(x_i)}
double * RyLayer [NumLayers];
double * RxLayer [NumLayers];
double * RGLayer [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
RyLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
RxLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
RGLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
}
// RGW[i]: R{} Gradient of KL w.r.t. to Neural Network Weight W[i], i.e. R{d(KL)/d(W[i])}
// RGB[i]: R{} Gradient of KL w.r.t. to Neural Network Bias B[i], i.e. R{d(KL)/d(B[i])}
// There is one-to-one correspondence between: RGW[i] and W[i], RGB[i] and B[i]
double * RGW [NumLayers-1];
double * RGB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
RGW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
RGB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// RStd[i]: R{} of Std[i], i.e. R{Std[i]}
// RGStd[i]: R{} Gradient of KL w.r.t. log standard deviation LogStd[i], i.e. R{d(KL)/d(LogStd[i])}
double * RStd = (double *) calloc(ActionSpaceDim, sizeof(double));
double * RGLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Load Neural Network ////////////////////
// Open Model File that contains Weights, Bias and std
FILE *ModelFilePointer = fopen(ModelFile, "r");
if (ModelFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Model File [%s]. \n", ModelFile);
return -1;
}
// Read Weights and Bias from file
for (size_t i=0; i<NumLayers-1; ++i) {
// Reading Weights W[i]: from Layer[i] to Layer[i+1]
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &W[i][j*nextLayerDim+k]);
}
}
// Reading Bias B[i]: from Layer[i] to Layer[i+1]
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &B[i][k]);
}
}
// Read LogStd from file
// Remarks: actually this std will be overwritten by the std from the datafile
for (size_t k=0; k<ActionSpaceDim; ++k) {
fscanf(ModelFilePointer, "%lf", &LogStd[k]);
}
// Close Model File
fclose(ModelFilePointer);
//////////////////// Load Input Vector and Init Result Vector ////////////////////
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
VW[i][j*nextLayerDim+k] = Input[pos];
Result[pos] = 0;
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
VB[i][k] = Input[pos];
Result[pos] = 0;
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
VLogStd[k] = Input[pos];
Result[pos] = 0;
pos++;
}
//////////////////// Load Simulation Data ////////////////////
// Open Data File that contains Mean, std and Observation
FILE *DataFilePointer = fopen(DataFile, "r");
if (DataFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Data File [%s]. \n", DataFile);
return -1;
}
// Read Mean, Std and Observation
// Remarks: Std is the same for all samples, and appears in every line in the data file
// so we are reading the same Std again and again to the same place.
for (size_t i=0; i<NumSamples; ++i) {
// Read Mean
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Mean[i*ActionSpaceDim+j]);
}
// Read Std
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Std[j]);
}
// Read Observation
for (size_t j=0; j<ObservSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Observ[i*ObservSpaceDim+j]);
}
}
// Close Data File
fclose(DataFilePointer);
// Compute Average Sample Mean and Average Sample Mean Square
for (size_t i=0; i<NumSamples; ++i) {
for (size_t j=0; j<ActionSpaceDim; ++j) {
AvgSampleMean[j] += Mean[i*ActionSpaceDim+j];
AvgSampleMeanSq[j] += Mean[i*ActionSpaceDim+j] * Mean[i*ActionSpaceDim+j];
}
}
for (size_t j=0; j<ActionSpaceDim; ++j) {
AvgSampleMean[j] = AvgSampleMean[j] / (double)NumSamples;
AvgSampleMeanSq[j] = AvgSampleMeanSq[j] / (double)NumSamples;
}
//////////////////// Main Loop Over All Samples ////////////////////
// Measure Elapsed Time
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for (size_t iter=0; iter<NumSamples; iter++) {
//////////////////// Ordinary Forward Propagation ////////////////////
// Assign Input Values
for (size_t i=0; i<ObservSpaceDim; ++i) Layer[0][i] = Observ[iter*ObservSpaceDim+i];
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
// Propagate from Layer[i] to Layer[i+1]
for (size_t j=0; j<LayerSize[i+1]; ++j) {
// Calculating pre-activated value for item[j] in next layer
Layer[i+1][j] = B[i][j];
for (size_t k=0; k<LayerSize[i]; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
Layer[i+1][j] += Layer[i][k] * W[i][k*LayerSize[i+1]+j];
}
// Apply Activation Function
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function
case 't': {Layer[i+1][j] = tanh(Layer[i+1][j]); break;}
// 0.1x Activation Function
case 'o': {Layer[i+1][j] = 0.1*Layer[i+1][j]; break;}
// sigmoid Activation Function
case 's': {Layer[i+1][j] = 1.0/(1+exp(-Layer[i+1][j])); break;}
// Default: Activation Function not supported
default: {
printf("[ERROR] Activation Function for Layer [%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
return -1;
}
}
}
}
// Check whether the forward propagation output is correct
for (size_t i=0; i<ActionSpaceDim; ++i) {
double output = Layer[NumLayers-1][i];
double expected = Mean[iter*ActionSpaceDim+i];
double err = fabs( (output - expected) / expected ) * 100;
if (err>1) printf("out[%zu] = %e, mean = %e => %.4f%% Difference\n", i, output, expected, err);
}
//////////////////// Ordinary Backward Propagation ////////////////////
// Gradient Initialisation
// Assign the derivative of KL w.r.t. Mean (output values from the final layer) and Std
for (size_t i=0; i<ActionSpaceDim; ++i) {
GLayer[NumLayers-1][i] = 0;
GStd[i] = 0;
}
// Backward Propagation
for (size_t i=NumLayers-1; i>0; --i) {
// Propagate from Layer[i] to Layer[i-1]
for (size_t j=0; j<LayerSize[i]; ++j) {
// Differentiate the activation function
switch (AcFunc[i]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {GLayer[i][j] = GLayer[i][j] * (1- Layer[i][j] * Layer[i][j]); break;}
// 0.1x Activation Function
case 'o': {GLayer[i][j] = 0.1 * GLayer[i][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {GLayer[i][j] = GLayer[i][j] * Layer[i][j] * (1- Layer[i][j]); break;}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] Activation Function for Layer[%zu] is %c. Unsupported.\n", i, AcFunc[i]);
return -1;
}
}
// The derivative w.r.t to Bias is the same as that w.r.t. the pre-activated value
GB[i-1][j] = GLayer[i][j];
}
// Calculate the derivative w.r.t. to Weight
for (size_t j=0; j<LayerSize[i-1]; ++j) {
for (size_t k=0; k<LayerSize[i]; ++k) {
// The Derivative w.r.t. to the weight from Neuron #j in Layer[i-1] to Neuron #k in Layer[i]
GW[i-1][j*LayerSize[i]+k] = GLayer[i][k] * Layer[i-1][j];
}
}
// Calculate the derivative w.r.t. the output values from Layer[i]
for (size_t j=0; j<LayerSize[i-1]; ++j) {
GLayer[i-1][j] = 0;
for (size_t k=0; k<LayerSize[i]; ++k) {
// Accumulate the Gradient from Neuron #k in Layer[i] to Neuron #j in Layer[i-1]
GLayer[i-1][j] += GLayer[i][k] * W[i-1][j*LayerSize[i]+k];
}
}
}
//////////////////// Pearlmutter Forward Propagation ////////////////////
// Input is constant, so the R{} derivative is 0
for (size_t i=0; i<ObservSpaceDim; ++i) {
RyLayer[0][i] = 0;
RxLayer[0][i] = 0;
}
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
// Propagate from Layer[i] to Layer[i+1]
for (size_t j=0; j<LayerSize[i+1]; ++j) {
// Calculate R{x_j} in next layer
RxLayer[i+1][j] = VB[i][j];
for (size_t k=0; k<LayerSize[i]; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
RxLayer[i+1][j] += RyLayer[i][k] * W[i][k*LayerSize[i+1]+j];
RxLayer[i+1][j] += Layer[i][k] * VW[i][k*LayerSize[i+1]+j];
}
// Calculate R{y_j} in next layer, need to differentiate Activation Function
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {RyLayer[i+1][j] = RxLayer[i+1][j]; break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {RyLayer[i+1][j] = RxLayer[i+1][j] * (1- Layer[i+1][j] * Layer[i+1][j]); break;}
// 0.1x Activation Function
case 'o': {RyLayer[i+1][j] = 0.1 * RxLayer[i+1][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {RyLayer[i+1][j] = RxLayer[i+1][j] * Layer[i+1][j] * (1- Layer[i+1][j]); break;}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] Activation Function for Layer[%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
return -1;
}
}
}
}
// Calculating R{Std}
// Remarks: R{Std} is w.r.t. to Std.
for (size_t i=0; i<ActionSpaceDim; ++i) {
RStd[i] = Std[i] * VLogStd[i];
}
//////////////////// Pearlmutter Backward Propagation ////////////////////
// Gradient Initialisation
// Calculating R{} Gradient of KL w.r.t. output values from the final layer, i.e. R{d(KL)/d(mean_i)}
// Calculating R{} Gradient of KL w.r.t. LogStd, i.e. R{d(KL)/d(LogStd[i])}
for (size_t i=0; i<ActionSpaceDim; ++i) {
double StdSq = Std[i] * Std[i];
RGLayer[NumLayers-1][i] = RyLayer[NumLayers-1][i]/StdSq - 2*GLayer[NumLayers-1][i]/Std[i]*RStd[i];
RGLogStd[i] = 2*RStd[i]/Std[i];
}
// Backward Propagation
for (size_t i=NumLayers-1; i>0; --i) {
// Propagate from Layer[i] to Layer[i-1]
for (size_t j=0; j<LayerSize[i]; ++j) {
// Calculating R{} Gradient of KL w.r.t. pre-activated values in Layer[i], i.e. R{d(KL)/d(x_i)}
// Differentiate the activation function
switch (AcFunc[i]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {
RGLayer[i][j] = (1-Layer[i][j]*Layer[i][j])*RGLayer[i][j] - 2*Layer[i][j]*GLayer[i][j]*RxLayer[i][j];
break;
}
// 0.1x Activation Function
case 'o': {RGLayer[i][j] = 0.1 * RGLayer[i][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {
RGLayer[i][j] = RGLayer[i][j]*Layer[i][j]*(1-Layer[i][j]) + GLayer[i][j]*(1-2*Layer[i][j])*RxLayer[i][j];
break;
}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] Activation Function for Layer [%zu] is %c. Unsupported.\n", i, AcFunc[i]);
return -1;
}
}
// The R{} derivative w.r.t to Bias is the same as that w.r.t. the pre-activated value
RGB[i-1][j] = RGLayer[i][j];
}
// Calculate the R{} derivative w.r.t. to Weight
for (size_t j=0; j<LayerSize[i-1]; ++j) {
for (size_t k=0; k<LayerSize[i]; ++k) {
// The R{} Derivative w.r.t. to the weight from Neuron #j in Layer[i-1] to Neuron #k in Layer[i]
RGW[i-1][j*LayerSize[i]+k] = Layer[i-1][j] * RGLayer[i][k] + RyLayer[i-1][j] * GLayer[i][k];
}
}
// Calculate the R{} derivative w.r.t. the output values from Layer[i]
for (size_t j=0; j<LayerSize[i-1]; ++j) {
RGLayer[i-1][j] = 0;
for (size_t k=0; k<LayerSize[i]; ++k) {
// Accumulate the Gradient from Neuron #k in Layer[i] to Neuron #j in Layer[i-1]
RGLayer[i-1][j] += VW[i-1][j*LayerSize[i]+k] * GLayer[i][k];
RGLayer[i-1][j] += W[i-1][j*LayerSize[i]+k] * RGLayer[i][k];
}
}
}
// Accumulate the Fisher-Vector Product to result
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
Result[pos] += RGW[i][j*nextLayerDim+k];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
Result[pos] += RGB[i][k];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
Result[pos] += RGLogStd[k];
pos++;
}
} // End of iteration over current sample
// Averaging Fisher Vector Product over the samples and apply CG Damping
for (size_t i=0; i<pos; ++i) {
Result[i] = Result[i] / (double)NumSamples;
Result[i] += CG_Damping * Input[i];
}
// Report Computing Time
gettimeofday(&tv2, NULL);
double runtimeComp = ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
printf("[INFO] FVP Computing Time is %f seconds.\n", runtimeComp);
//////////////////// Clean Up ////////////////////
// clean up
for (size_t i=0; i<NumLayers; ++i) {
free(Layer[i]); free(GLayer[i]);
free(RyLayer[i]); free(RxLayer[i]); free(RGLayer[i]);
}
for (size_t i=0; i<NumLayers-1; ++i) {
free(W[i]); free(VW[i]); free(GW[i]); free(RGW[i]);
free(B[i]); free(VB[i]); free(GB[i]); free(RGB[i]);
}
free(LogStd); free(VLogStd); free(RGLogStd);
free(GStd); free(RStd);
free(Observ); free(Mean); free(Std);
free(AvgSampleMean); free(AvgSampleMeanSq);
return runtimeComp;
}
double FVPFast (TRPOparam param, double *Result, double *Input, size_t NumThreads)
{
//////////////////// Remarks ////////////////////
// This function computes the Fisher-Vector Product using Pearlmutter Algorithm
// This version is customised to the case that KL is used as loss function
// Input: the vector to be multiplied with the Fisher Information Matrix
// Result: the Fisher-Vector Product
// Remarks: The length of Input and Result must be the number of all trainable parameters in the network
// Step1: Combined forward propagation
// Step2: Pearlmutter backward propagation
//////////////////// Read Parameters ////////////////////
// OpenMP Settings
omp_set_num_threads(NumThreads);
// Assign Parameters
const size_t NumLayers = param.NumLayers;
char * AcFunc = param.AcFunc;
size_t * LayerSize = param.LayerSize;
const size_t NumSamples = param.NumSamples;
char * ModelFile = param.ModelFile;
char * DataFile = param.DataFile;
const double CG_Damping = param.CG_Damping;
// Dimension of Observation Space
const size_t ObservSpaceDim = LayerSize[0];
// Dimension of Action Space
const size_t ActionSpaceDim = LayerSize[NumLayers-1];
// iterator when traversing through input vector and result vector
size_t pos;
//////////////////// Memory Allocation - Neural Network ////////////////////
// W[i]: Weight Matrix from Layer[i] to Layer[i+1]
// B[i]: Bias Vector from Layer[i] to Layer[i+1]
// Item (j,k) in W[i] refers to the weight from Neuron #j in Layer[i] to Neuron #k in Layer[i+1]
// Item B[k] is the bias of Neuron #k in Layer[i+1]
double * W [NumLayers-1];
double * B [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
W[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
B[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
//////////////////// Memory Allocation - Input Vector ////////////////////
// The Input Vector is to be multiplied with the Hessian Matrix of KL to derive the Fisher Vector Product
// There is one-to-one correspondence between the input vector and all trainable parameters in the neural network
// As a result, the shape of the Input Vector is the same as that of the parameters in the model
// The only difference is that the Input Vector is stored in a flattened manner
// There is one-to-one correspondence between: VW[i] and W[i], VB[i] and B[i], VStd[i] and Std[i]
double * VW [NumLayers-1];
double * VB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
VW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
VB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// Allocate Memory for Input Vector corresponding to LogStd
double * VLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Simulation Data ////////////////////
// Allocate Memory for Observation and Probability Mean
// Observ: list of observations - corresponds to ob_no in modular_rl
// Mean: list of probablity mean values - corresponds to the 'mean' part of prob_np in modular_rl
// Remarks: due to the specific setting of the experienments in the TRPO paper,
// Std is the same for all samples in each simulation iteration,
// so we just allocate Std memory space for one sample and use it for all samples.
// The general case should be another vector of Std with size NumSamples*ActionSpaceDim
double * Observ = (double *) calloc(NumSamples*ObservSpaceDim, sizeof(double));
double * Mean = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Std = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Ordinary Forward Propagation ////////////////////
// Layer[i] : Memory of each layer's outputs, i.e. y_i
double * Layer [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
Layer[i] = (double *) calloc(LayerSize[i], sizeof(double));
}
//////////////////// Memory Allocation - Pearlmutter Forward and Backward Propagation ////////////////////
// RyLayer[i]: R{} of each layer's outputs, i.e. R{y_i}
// RxLayer[i]: R{} of each layer's pre-activated outputs, i.e. R{x_i}
// RGLayer[I]: R{} Gradient of KL w.r.t. the pre-activation values in Layer[i], i.e. R{d(KL)/d(x_i)}
double * RyLayer [NumLayers];
double * RxLayer [NumLayers];
double * RGLayer [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
RyLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
RxLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
RGLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
}
// RGW[i]: R{} Gradient of KL w.r.t. to Neural Network Weight W[i], i.e. R{d(KL)/d(W[i])}
// RGB[i]: R{} Gradient of KL w.r.t. to Neural Network Bias B[i], i.e. R{d(KL)/d(B[i])}
// There is one-to-one correspondence between: RGW[i] and W[i], RGB[i] and B[i]
double * RGW [NumLayers-1];
double * RGB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
RGW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
RGB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
//////////////////// Load Neural Network ////////////////////
// Open Model File that contains Weights, Bias and std
FILE *ModelFilePointer = fopen(ModelFile, "r");
if (ModelFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Model File [%s]. \n", ModelFile);
return -1;
}
// Read Weights and Bias from file
for (size_t i=0; i<NumLayers-1; ++i) {
// Reading Weights W[i]: from Layer[i] to Layer[i+1]
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &W[i][j*nextLayerDim+k]);
}
}
// Reading Bias B[i]: from Layer[i] to Layer[i+1]
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &B[i][k]);
}
}
// Read LogStd from file
// Remarks: actually this LogStd will be overwritten by the Std from the datafile
for (size_t k=0; k<ActionSpaceDim; ++k) {
fscanf(ModelFilePointer, "%lf", &Std[k]);
}
// Close Model File
fclose(ModelFilePointer);
//////////////////// Load Input Vector and Init Result Vector ////////////////////
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
VW[i][j*nextLayerDim+k] = Input[pos];
Result[pos] = 0;
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
VB[i][k] = Input[pos];
Result[pos] = 0;
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
VLogStd[k] = Input[pos];
Result[pos] = 0;
pos++;
}
//////////////////// Load Simulation Data ////////////////////
// Open Data File that contains Mean, std and Observation
FILE *DataFilePointer = fopen(DataFile, "r");
if (DataFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Data File [%s]. \n", DataFile);
return -1;
}
// Read Mean, Std and Observation
// Remarks: Std is the same for all samples, and appears in every line in the data file
// so we are writing the same Std again and again to the same place.
for (size_t i=0; i<NumSamples; ++i) {
// Read Mean
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Mean[i*ActionSpaceDim+j]);
}
// Read Std
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Std[j]);
}
// Read Observation
for (size_t j=0; j<ObservSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Observ[i*ObservSpaceDim+j]);
}
}
// Close Data File
fclose(DataFilePointer);
//////////////////// Main Loop Over All Samples ////////////////////
// Measure Elapsed Time
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for (size_t iter=0; iter<NumSamples; iter++) {
//////////////////// Combined Forward Propagation ////////////////////
// Initialise the Input Layer
for (size_t i=0; i<ObservSpaceDim; ++i) {
Layer[0][i] = Observ[iter*ObservSpaceDim+i];
RxLayer[0][i] = 0;
RyLayer[0][i] = 0;
}
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
size_t CurrLayerSize = LayerSize[i];
size_t NextLayerSize = LayerSize[i+1];
size_t j, k;
// Propagate from Layer[i] to Layer[i+1]
#pragma omp parallel for private(j,k) shared(Layer, RxLayer, RyLayer, W, VW, B, VB, AcFunc) schedule(static)
for (j=0; j<NextLayerSize; ++j) {
// Initialise x_j and R{x_j} in next layer
// Here we just use y_j's memory space to store x_j temoporarily
Layer[i+1][j] = B[i][j];
RxLayer[i+1][j] = VB[i][j];
for (k=0; k<CurrLayerSize; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
Layer[i+1][j] += Layer[i][k] * W[i][k*NextLayerSize+j];
RxLayer[i+1][j] += RyLayer[i][k] * W[i][k*NextLayerSize+j];
RxLayer[i+1][j] += Layer[i][k] * VW[i][k*NextLayerSize+j];
}
// Calculate y_j and R{y_j} in next layer. Note that R{y_j} depends on y_j
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {
RyLayer[i+1][j] = RxLayer[i+1][j];
break;
}
// tanh() Activation Function
case 't': {
Layer[i+1][j] = tanh(Layer[i+1][j]);
RyLayer[i+1][j] = RxLayer[i+1][j] * (1 - Layer[i+1][j] * Layer[i+1][j]);
break;
}
// 0.1x Activation Function
case 'o': {
Layer[i+1][j] = 0.1 * Layer[i+1][j];
RyLayer[i+1][j] = 0.1 * RxLayer[i+1][j];
break;
}
// sigmoid Activation Function
case 's': {
Layer[i+1][j] = 1.0 / ( 1 + exp(-Layer[i+1][j]) );
RyLayer[i+1][j] = RxLayer[i+1][j] * Layer[i+1][j] * (1 - Layer[i+1][j]);
break;
}
// Default: Activation Function not supported
default: {
printf("[ERROR] AC Function for Layer[%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
}
}
}
}
// Check whether the forward propagation output is correct
for (size_t i=0; i<ActionSpaceDim; ++i) {
double output = Layer[NumLayers-1][i];
double expected = Mean[iter*ActionSpaceDim+i];
double err = fabs( (output - expected) / expected ) * 100;
if (err>1) printf("out[%zu] = %e, mean = %e => %.4f%% Difference\n", i, output, expected, err);
}
//////////////////// Pearlmutter Backward Propagation ////////////////////
// Gradient Initialisation
// Calculating R{} Gradient of KL w.r.t. output values from the final layer, i.e. R{d(KL)/d(mean_i)}
for (size_t i=0; i<ActionSpaceDim; ++i) {
RGLayer[NumLayers-1][i] = RyLayer[NumLayers-1][i] / Std[i] / Std[i];
}
// Backward Propagation
for (size_t i=NumLayers-1; i>0; --i) {
size_t CurrLayerSize = LayerSize[i];
size_t PrevLayerSize = LayerSize[i-1];
size_t j, k;
// Propagate from Layer[i] to Layer[i-1]
#pragma omp parallel for private(j) shared(Layer, RGLayer, RGB) schedule(static)
for (j=0; j<CurrLayerSize; ++j) {
// Calculating R{} Gradient of KL w.r.t. pre-activated values in Layer[i], i.e. R{d(KL)/d(x_i)}
// Differentiate the activation function
switch (AcFunc[i]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {RGLayer[i][j] = (1-Layer[i][j]*Layer[i][j])*RGLayer[i][j]; break;}
// 0.1x Activation Function
case 'o': {RGLayer[i][j] = 0.1 * RGLayer[i][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {RGLayer[i][j] = RGLayer[i][j]*Layer[i][j]*(1-Layer[i][j]); break;}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] AC Function for Layer [%zu] is %c. Unsupported.\n", i, AcFunc[i]);
}
}
// The R{} derivative w.r.t to Bias is the same as that w.r.t. the pre-activated value
RGB[i-1][j] = RGLayer[i][j];
}
// Calculate the R{} derivative w.r.t. to Weight and the output values from Layer[i]
#pragma omp parallel for private(j,k) shared(Layer, RGLayer, W, RGW) schedule(static)
for (j=0; j<PrevLayerSize; ++j) {
double temp = 0;
for (k=0; k<CurrLayerSize; ++k) {
// The R{} Derivative w.r.t. to the weight from Neuron #j in Layer[i-1] to Neuron #k in Layer[i]
RGW[i-1][j*CurrLayerSize+k] = Layer[i-1][j] * RGLayer[i][k];
// Accumulate the Gradient from Neuron #k in Layer[i] to Neuron #j in Layer[i-1]
temp += W[i-1][j*CurrLayerSize+k] * RGLayer[i][k];
}
RGLayer[i-1][j] = temp;
}
}
// Accumulate the Fisher-Vector Product to result
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
Result[pos] += RGW[i][j*nextLayerDim+k];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
Result[pos] += RGB[i][k];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
Result[pos] += 2 * VLogStd[k];
pos++;
}
} // End of iteration over current sample
// Averaging Fisher Vector Product over the samples and apply CG Damping
#pragma omp parallel for
for (size_t i=0; i<pos; ++i) {
Result[i] = Result[i] / (double)NumSamples + CG_Damping * Input[i];
}
gettimeofday(&tv2, NULL);
double runtimeS = ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
//////////////////// Clean Up ////////////////////
// clean up
for (size_t i=0; i<NumLayers; ++i) {
free(Layer[i]); free(RxLayer[i]); free(RyLayer[i]); free(RGLayer[i]);
}
for (size_t i=0; i<NumLayers-1; ++i) {
free(W[i]); free(VW[i]); free(RGW[i]);
free(B[i]); free(VB[i]); free(RGB[i]);
}
free(Observ); free(Mean); free(Std); free(VLogStd);
return runtimeS;
}
double CG(TRPOparam param, double *Result, double *b, size_t MaxIter, double ResidualTh, size_t NumThreads)
{
//////////////////// Conjugate Gradient ////////////////////
// This function implements Conjugate Gradient algorithm to solve linear equation Ax=b
// Result: The Conjugate Gradient Result, i.e. solution x to Ax=b
// b: Vector b in the equation Ax=b
// MaxIter: Maximum Iterations of Conjugate Gradient (in modular_rl is 10)
// ResidualTh: Threshold of Residual (in modular_rl is 1e-10)
// OpenMP Settings
omp_set_num_threads(NumThreads);
// Memory Allocation
size_t NumParams = NumParamsCalc(param.LayerSize, param.NumLayers);
double * p = (double *) calloc(NumParams, sizeof(double));
double * r = (double *) calloc(NumParams, sizeof(double));
double * x = (double *) calloc(NumParams, sizeof(double));
double * z = (double *) calloc(NumParams, sizeof(double));
// Initialisation
double rdotr = 0;
for (size_t i=0; i<NumParams; ++i) {
p[i] = b[i];
r[i] = b[i];
rdotr += r[i] * r[i];
}
// Iterative Solver
// Measure Elapsed Time
struct timeval tv1, tv2;
double ComptimeS = 0;
for (size_t iter=0; iter<=MaxIter; ++iter) {
// Calculate Frobenius Norm of x
double FrobNorm = 0;
gettimeofday(&tv1, NULL);
#pragma omp parallel for reduction (+:FrobNorm)
for (size_t i=0; i<NumParams; ++i) {
FrobNorm += x[i] * x[i];
}
FrobNorm = sqrt(FrobNorm);
gettimeofday(&tv2, NULL);
printf("CG Iter[%zu] Residual Norm=%.12e, Soln Norm=%.12e\n", iter, rdotr, FrobNorm);
// Check Termination Condition
if (rdotr<ResidualTh || iter==MaxIter) {
for (size_t i=0; i<NumParams; ++i) Result[i] = x[i];
break;
}
// Calculate z = FIM*p
double FVPTime = FVPFast(param, z, p, NumThreads);
if (FVPTime<0) {
fprintf(stderr, "[ERROR] Fisher Vector Product Calculation Failed.\n");
free(p); free(r); free(x); free(z);
return -1;
}
else {
ComptimeS += ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
ComptimeS += FVPTime;
}
// Update x and r
double pdotz = 0;
gettimeofday(&tv1, NULL);
#pragma omp parallel for reduction (+:pdotz)
for (size_t i=0; i<NumParams; ++i) {
pdotz += p[i] * z[i];
}
double v = rdotr / pdotz;
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
x[i] += v * p[i];
r[i] -= v * z[i];
}
// Update p
double newrdotr = 0;
#pragma omp parallel for reduction (+:newrdotr)
for (size_t i=0; i<NumParams; ++i) {
newrdotr += r[i] * r[i];
}
double mu = newrdotr / rdotr;
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
p[i] = r[i] + mu * p[i];
}
// Update rdotr
rdotr = newrdotr;
gettimeofday(&tv2, NULL);
ComptimeS += ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
}
// Clean Up
free(p); free(r); free(x); free(z);
return ComptimeS;
}
double FVP_FPGA (TRPOparam param, double *Result, double *Input)
{
//////////////////// Remarks ////////////////////
// This function computes the Fisher-Vector Product using Pearlmutter Algorithm
// This version is customised to the case that KL is used as loss function
// Input: the vector to be multiplied with the Fisher Information Matrix
// Result: the Fisher-Vector Product
// Remarks: The length of Input and Result must be the number of all trainable parameters in the network
// Step1: Combined forward propagation
// Step3: Pearlmutter backward propagation
//////////////////// Read Parameters ////////////////////
// Assign Parameters - For CPU and FPGA
const size_t NumLayers = param.NumLayers;
size_t * LayerSize = param.LayerSize;
const size_t NumSamples = param.NumSamples;
char * ModelFile = param.ModelFile;
char * DataFile = param.DataFile;
const double CG_Damping = param.CG_Damping;
// Assign Parameters - For FPGA Only
size_t * PaddedLayerSize = param.PaddedLayerSize;
size_t * NumBlocks = param.NumBlocks;
// Dimension of Observation Space
const size_t ObservSpaceDim = LayerSize[0];
// Dimension of Action Space
const size_t ActionSpaceDim = LayerSize[NumLayers-1];
// iterator when traversing through input vector and result vector
size_t pos;
//////////////////// Memory Allocation - Neural Network ////////////////////
// W[i]: Weight Matrix from Layer[i] to Layer[i+1]
// B[i]: Bias Vector from Layer[i] to Layer[i+1]
// Item (j,k) in W[i] refers to the weight from Neuron #j in Layer[i] to Neuron #k in Layer[i+1]
// Item B[k] is the bias of Neuron #k in Layer[i+1]
double * W [NumLayers-1];
double * B [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
W[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
B[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
//////////////////// Memory Allocation - Input Vector ////////////////////
// The Input Vector is to be multiplied with the Hessian Matrix of KL to derive the Fisher Vector Product
// There is one-to-one correspondence between the input vector and all trainable parameters in the neural network
// As a result, the shape of the Input Vector is the same as that of the parameters in the model
// The only difference is that the Input Vector is stored in a flattened manner
// There is one-to-one correspondence between: VW[i] and W[i], VB[i] and B[i], VStd[i] and Std[i]
double * VW [NumLayers-1];
double * VB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
VW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
VB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// Allocate Memory for Input Vector corresponding to LogStd
double * VLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Simulation Data ////////////////////
// Allocate Memory for Observation and Probability Mean
// Observ: list of observations - corresponds to ob_no in modular_rl
// Mean: list of probablity mean values - corresponds to the 'mean' part of prob_np in modular_rl
// Remarks: due to the specific setting of the experienments in the TRPO paper,
// Std is the same for all samples in each simulation iteration,
// so we just allocate Std memory space for one sample and use it for all samples.
// The general case should be another vector of Std with size NumSamples*ActionSpaceDim
double * Observ = (double *) calloc(NumSamples*ObservSpaceDim, sizeof(double));
double * Mean = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Std = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Load Neural Network ////////////////////
// Open Model File that contains Weights, Bias and std
FILE *ModelFilePointer = fopen(ModelFile, "r");
if (ModelFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Model File [%s]. \n", ModelFile);
return -1;
}
// Read Weights and Bias from file
for (size_t i=0; i<NumLayers-1; ++i) {
// Reading Weights W[i]: from Layer[i] to Layer[i+1]
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &W[i][j*nextLayerDim+k]);
}
}
// Reading Bias B[i]: from Layer[i] to Layer[i+1]
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &B[i][k]);
}
}
// Read LogStd from file
// Remarks: actually this LogStd will be overwritten by the Std from the datafile
for (size_t k=0; k<ActionSpaceDim; ++k) {
fscanf(ModelFilePointer, "%lf", &Std[k]);
}
// Close Model File
fclose(ModelFilePointer);
//////////////////// Load Input Vector and Init Result Vector ////////////////////
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
VW[i][j*nextLayerDim+k] = Input[pos];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
VB[i][k] = Input[pos];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
VLogStd[k] = Input[pos];
pos++;
}
//////////////////// Load Simulation Data ////////////////////
// Open Data File that contains Mean, std and Observation
FILE *DataFilePointer = fopen(DataFile, "r");
if (DataFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Data File [%s]. \n", DataFile);
return -1;
}
// Read Mean, Std and Observation
// Remarks: Std is the same for all samples, and appears in every line in the data file
// so we are writing the same Std again and again to the same place.
for (size_t i=0; i<NumSamples; ++i) {
// Read Mean
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Mean[i*ActionSpaceDim+j]);
}
// Read Std
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Std[j]);
}
// Read Observation
for (size_t j=0; j<ObservSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Observ[i*ObservSpaceDim+j]);
}
}
// Close Data File
fclose(DataFilePointer);
//////////////////// FPGA - Initialisation ////////////////////
// Load Maxfile and Engine
fprintf(stderr, "[INFO] Initialising FPGA...\n");
max_file_t* maxfile = TRPO_init();
max_engine_t* engine = max_load(maxfile, "*");
fprintf(stderr, "[INFO] Loading Model and Simulation Data...\n");
// Calculate BlockDim
size_t * BlockDim = (size_t *) calloc(NumLayers, sizeof(size_t));
for (int i=0; i<NumLayers; ++i) BlockDim[i] = PaddedLayerSize[i] / NumBlocks[i];
// Length of Weight and VWeight Initialisation Vector
int WeightInitVecLength = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
WeightInitVecLength += 2 * BlockDim[i] * PaddedLayerSize[i+1];
}
// Length of Observation Vector
// Remarks: DRAM Write requires data bit-size to be a multiple of 384bytes
// Namely, the number of items must be a multiple of 48
size_t ObservVecLength = WeightInitVecLength + NumSamples*BlockDim[0];
size_t ObservVecWidth = NumBlocks[0];
size_t ActualObservVecItems = ObservVecLength * ObservVecWidth;
size_t PaddedObservVecItems = (size_t) 48 * ceil( (double)ActualObservVecItems/48 );
fprintf(stderr, "[INFO] Observation Vector (%zu bytes) padded to %zu bytes\n", ActualObservVecItems*8, PaddedObservVecItems*8);
double * Observation = (double *) calloc(PaddedObservVecItems, sizeof(double));
// Feed Weight and VWeight into Observation
size_t RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
// Parameters of current
size_t InBlockDim = BlockDim[ID];
size_t NumInBlocks = NumBlocks[ID];
size_t OutBlockDim = BlockDim[ID+1];
size_t NumOutBlocks = NumBlocks[ID+1];
size_t OutLayerSize = LayerSize[ID+1];
// Feed Weight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
Observation[RowNum*ObservVecWidth+X] = W[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else Observation[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
// Feed VWeight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (size_t X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
Observation[RowNum*ObservVecWidth+X] = VW[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else Observation[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
}
// Feed actual observation data into Observation
for (size_t iter=0; iter<NumSamples; ++iter) {
size_t InBlockDim = BlockDim[0];
size_t NumInBlocks = NumBlocks[0];
for (int addrX=0; addrX<InBlockDim; ++addrX) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[0];
if (RowNumPadded<RowNumLimit) Observation[RowNum*ObservVecWidth+X] = Observ[iter*ObservSpaceDim+RowNumPadded];
else Observation[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
// Length of BiasStd Vector
size_t BiasStdVecLength = PaddedLayerSize[NumLayers-1];
for (size_t i=1; i<NumLayers; ++i) {
BiasStdVecLength += 2*PaddedLayerSize[i];
}
double * BiasStd = (double *) calloc(BiasStdVecLength, sizeof(double));
// Feed Bias and VBias into BiasStd
RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
size_t nextLayerDim = PaddedLayerSize[ID+1];
size_t nextLayerDimLimit = LayerSize[ID+1];
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd[RowNum] = B[ID][k];
else BiasStd[RowNum] = 0;
RowNum++;
}
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd[RowNum] = VB[ID][k];
else BiasStd[RowNum] = 0;
RowNum++;
}
}
// Feed (1/Std)^2 into BiasStd
for (size_t k=0; k<PaddedLayerSize[NumLayers-1]; ++k) {
size_t LayerDimLimit = LayerSize[NumLayers-1];
if (k<LayerDimLimit) BiasStd[RowNum] = 1.0 / Std[k] / Std[k];
else BiasStd[RowNum] = 0;
RowNum++;
}
//////////////////// FPGA - Init ////////////////////
TRPO_WriteDRAM_actions_t write_action;
write_action.param_start_bytes = 0;
write_action.param_size_bytes = PaddedObservVecItems * sizeof(double);
write_action.instream_fromCPU = Observation;
TRPO_WriteDRAM_run(engine, &write_action);
fprintf(stderr, "[INFO] Loading Model and Simulation Data...Done\n");
//////////////////// FPGA - Run ////////////////////
// Here we assume 4 layers
// Number of Cycles to Run - Forward Propagation and Back Propagation
size_t MaxBlkDim0Dim2 = (BlockDim[0]>BlockDim[2]) ? BlockDim[0] : BlockDim[2];
size_t FwdCyclesPerSample = BlockDim[0] + (BlockDim[1]-1)*MaxBlkDim0Dim2 + BlockDim[2]*BlockDim[3];
size_t BwdCyclesPerSample = BlockDim[1]*MaxBlkDim0Dim2 + BlockDim[2]*BlockDim[3];
size_t CyclesPerSample = (FwdCyclesPerSample>BwdCyclesPerSample) ? FwdCyclesPerSample : BwdCyclesPerSample;
size_t PropCyclesTotal = CyclesPerSample * (NumSamples + 1);
// Number of Cycles to Run - Read Result
size_t FVPLength = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
FVPLength += PaddedLayerSize[i] * PaddedLayerSize[i+1];
FVPLength += PaddedLayerSize[i+1];
}
int PaddedFVPLength = ((int)ceil((double)FVPLength/2))*2;
// Number of Cycles to Run - Total
size_t NumTicks = WeightInitVecLength + PropCyclesTotal + PaddedFVPLength + 20;
// Allocation Memory Space for FVP Result
double * FVPResult = (double *) calloc(PaddedFVPLength, sizeof(double));
// Init Advanced Static Interface
TRPO_Run_actions_t run_action;
run_action.param_NumSamples = NumSamples;
run_action.param_PaddedObservVecItems = PaddedObservVecItems;
run_action.instream_BiasStd = BiasStd;
run_action.outstream_FVP = FVPResult;
// Run DFE and Measure Elapsed Time
fprintf(stderr, "[INFO] Running on FPGA for %zu cycles...\n", NumTicks);
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
TRPO_Run_run(engine, &run_action);
gettimeofday(&tv2, NULL);
double runtimeS = ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
fprintf(stderr, "[INFO] Running on FPGA...Done\n");
fprintf(stderr, "[INFO] Elasped Time (FPGA) is %f seconds.\n", runtimeS);
// Free Engine and Maxfile
max_unload(engine);
TRPO_free();
// Read FVP into Result
pos = 0;
size_t FVPPos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerSizePadded = PaddedLayerSize[i];
size_t nextLayerSizePadded = PaddedLayerSize[i+1];
size_t curLayerSizeReal = LayerSize[i];
size_t nextLayerSizeReal = LayerSize[i+1];
for (size_t j=0; j<curLayerSizePadded; ++j) {
for (size_t k=0; k<nextLayerSizePadded; ++k) {
if ( (j<curLayerSizeReal) && (k<nextLayerSizeReal) ) {
Result[pos] = FVPResult[FVPPos];
pos++;
}
FVPPos++;
}
}
for (size_t k=0; k<nextLayerSizePadded; ++k) {
if (k<nextLayerSizeReal) {
Result[pos] = FVPResult[FVPPos];
pos++;
}
FVPPos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
Result[pos] = 2 * NumSamples * VLogStd[k];
pos++;
}
// Averaging Fisher Vector Product over the samples and apply CG Damping
for (size_t i=0; i<pos; ++i) {
Result[i] = Result[i] / (double)NumSamples;
Result[i] += CG_Damping * Input[i];
}
//////////////////// Clean Up ////////////////////
fprintf(stderr, "[INFO] Clean up...\n");
// Free Memories Allocated for Reading Files
for (size_t i=0; i<NumLayers-1; ++i) {
free(W[i]); free(VW[i]);
free(B[i]); free(VB[i]);
}
free(Observ); free(Mean); free(Std); free(VLogStd);
// Free Memories Allocated for DFE
free(Observation); free(BiasStd); free(FVPResult);
return runtimeS;
}
double CG_FPGA (TRPOparam param, double *Result, double *b, size_t MaxIter, double ResidualTh, size_t NumThreads)
{
//////////////////// Conjugate Gradient ////////////////////
// This function implements Conjugate Gradient algorithm to solve linear equation Ax=b
// Result: The Conjugate Gradient Result, i.e. solution x to Ax=b
// b: Vector b in the equation Ax=b
// MaxIter: Maximum Iterations of Conjugate Gradient (in modular_rl is 10)
// ResidualTh: Threshold of Residual (in modular_rl is 1e-10)
// NumThreads: Number of Threads to use
//////////////////// Parameters ////////////////////
// OpenMP Settings
omp_set_num_threads(NumThreads);
// Assign Parameters - For CPU and FPGA
const size_t NumLayers = param.NumLayers;
size_t * LayerSize = param.LayerSize;
const size_t NumSamples = param.NumSamples;
char * ModelFile = param.ModelFile;
char * DataFile = param.DataFile;
const double CG_Damping = param.CG_Damping;
const size_t NumParams = NumParamsCalc(LayerSize, NumLayers);
// Assign Parameters - For FPGA Only
size_t * PaddedLayerSize = param.PaddedLayerSize;
size_t * NumBlocks = param.NumBlocks;
// Dimension of Observation Space and Action Space
const size_t ObservSpaceDim = LayerSize[0];
const size_t ActionSpaceDim = LayerSize[NumLayers-1];
// Calculate BlockDim
size_t * BlockDim = (size_t *) calloc(NumLayers, sizeof(size_t));
for (int i=0; i<NumLayers; ++i) BlockDim[i] = PaddedLayerSize[i] / NumBlocks[i];
// Length of Weight and VWeight Initialisation Vector
int WeightInitVecLength = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
WeightInitVecLength += 2 * BlockDim[i] * PaddedLayerSize[i+1];
}
// Number of Cycles to Run on FPGA - Pipelined Forward and Back Propagation
// Remarks: Here we assume 4 layers
size_t MaxBlkDim0Dim2 = (BlockDim[0]>BlockDim[2]) ? BlockDim[0] : BlockDim[2];
size_t FwdCyclesPerSample = BlockDim[0] + (BlockDim[1]-1)*MaxBlkDim0Dim2 + BlockDim[2]*BlockDim[3];
size_t BwdCyclesPerSample = BlockDim[1]*MaxBlkDim0Dim2 + BlockDim[2]*BlockDim[3];
size_t CyclesPerSample = (FwdCyclesPerSample>BwdCyclesPerSample) ? FwdCyclesPerSample : BwdCyclesPerSample;
size_t PropCyclesTotal = CyclesPerSample * (NumSamples + 1);
// Number of Cycles to Run on FPGA - Read Result Back
size_t FVPLength = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
FVPLength += PaddedLayerSize[i] * PaddedLayerSize[i+1];
FVPLength += PaddedLayerSize[i+1];
}
int PaddedFVPLength = ((int)ceil((double)FVPLength/2))*2;
// Number of Cycles to Run on FPGA for Each FVP Computation - Total
size_t NumTicks = WeightInitVecLength + PropCyclesTotal + PaddedFVPLength + 20;
// Allocation Memory Space for FVP Result
double * FVPResult = (double *) calloc(PaddedFVPLength, sizeof(double));
// iterator when traversing through input vector and result vector
size_t pos;
//////////////////// Memory Allocation - Neural Network ////////////////////
double * p = (double *) calloc(NumParams, sizeof(double));
double * r = (double *) calloc(NumParams, sizeof(double));
double * x = (double *) calloc(NumParams, sizeof(double));
double * z = (double *) calloc(NumParams, sizeof(double));
//////////////////// Memory Allocation - Neural Network ////////////////////
// W[i]: Weight Matrix from Layer[i] to Layer[i+1]
// B[i]: Bias Vector from Layer[i] to Layer[i+1]
// Item (j,k) in W[i] refers to the weight from Neuron #j in Layer[i] to Neuron #k in Layer[i+1]
// Item B[k] is the bias of Neuron #k in Layer[i+1]
double * W [NumLayers-1];
double * B [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
W[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
B[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
//////////////////// Memory Allocation - Input Vector ////////////////////
// The Input Vector is to be multiplied with the Hessian Matrix of KL to derive the Fisher Vector Product
// There is one-to-one correspondence between the input vector and all trainable parameters in the neural network
// As a result, the shape of the Input Vector is the same as that of the parameters in the model
// The only difference is that the Input Vector is stored in a flattened manner
// There is one-to-one correspondence between: VW[i] and W[i], VB[i] and B[i], VStd[i] and Std[i]
double * VW [NumLayers-1];
double * VB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
VW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
VB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// Allocate Memory for Input Vector corresponding to LogStd
double * VLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Simulation Data ////////////////////
// Allocate Memory for Observation and Probability Mean
// Observ: list of observations - corresponds to ob_no in modular_rl
// Mean: list of probablity mean values - corresponds to the 'mean' part of prob_np in modular_rl
// Remarks: due to the specific setting of the experienments in the TRPO paper,
// Std is the same for all samples in each simulation iteration,
// so we just allocate Std memory space for one sample and use it for all samples.
// The general case should be another vector of Std with size NumSamples*ActionSpaceDim
double * Observ = (double *) calloc(NumSamples*ObservSpaceDim, sizeof(double));
double * Mean = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Std = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Load Neural Network ////////////////////
// Open Model File that contains Weights, Bias and std
FILE *ModelFilePointer = fopen(ModelFile, "r");
if (ModelFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Model File [%s]. \n", ModelFile);
return -1;
}
// Read Weights and Bias from file
for (size_t i=0; i<NumLayers-1; ++i) {
// Reading Weights W[i]: from Layer[i] to Layer[i+1]
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &W[i][j*nextLayerDim+k]);
}
}
// Reading Bias B[i]: from Layer[i] to Layer[i+1]
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &B[i][k]);
}
}
// Read LogStd from file
// Remarks: actually this LogStd will be overwritten by the Std from the datafile
for (size_t k=0; k<ActionSpaceDim; ++k) {
fscanf(ModelFilePointer, "%lf", &Std[k]);
}
// Close Model File
fclose(ModelFilePointer);
//////////////////// Load Vector b and Init Result Vector ////////////////////
// Initialisation - CG
double rdotr = 0;
for (size_t i=0; i<NumParams; ++i) {
p[i] = b[i];
r[i] = b[i];
rdotr += r[i] * r[i];
}
// Initialisation - FVP
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
VW[i][j*nextLayerDim+k] = b[pos];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
VB[i][k] = b[pos];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
VLogStd[k] = b[pos];
pos++;
}
//////////////////// Load Simulation Data ////////////////////
// Open Data File that contains Mean, std and Observation
FILE *DataFilePointer = fopen(DataFile, "r");
if (DataFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Data File [%s]. \n", DataFile);
return -1;
}
// Read Mean, Std and Observation
// Remarks: Std is the same for all samples, and appears in every line in the data file
// so we are writing the same Std again and again to the same place.
for (size_t i=0; i<NumSamples; ++i) {
// Read Mean
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Mean[i*ActionSpaceDim+j]);
}
// Read Std
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Std[j]);
}
// Read Observation
for (size_t j=0; j<ObservSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Observ[i*ObservSpaceDim+j]);
}
}
// Close Data File
fclose(DataFilePointer);
//////////////////// FPGA - Initialisation ////////////////////
// Load Maxfile and Engine
fprintf(stderr, "[INFO] Initialising FPGA...\n");
max_file_t* maxfile = TRPO_init();
max_engine_t* engine = max_load(maxfile, "*");
fprintf(stderr, "[INFO] Loading Model and Simulation Data...\n");
// Length of Observation Vector
// Remarks: DRAM Write requires data bit-size to be a multiple of 384bytes
// Namely, the number of items must be a multiple of 48
size_t ObservVecLength = WeightInitVecLength + NumSamples*BlockDim[0];
size_t ObservVecWidth = NumBlocks[0];
size_t ActualObservVecItems = ObservVecLength * ObservVecWidth;
size_t PaddedObservVecItems = (size_t) 48 * ceil( (double)ActualObservVecItems/48 );
fprintf(stderr, "[INFO] Observation Vector (%zu bytes) padded to %zu bytes\n", ActualObservVecItems*8, PaddedObservVecItems*8);
double * Observation = (double *) calloc(PaddedObservVecItems, sizeof(double));
// Length of DataP Vector
// Remarks: DRAM Write requires data bit-size to be a multiple of 384bytes
// Namely, the number of items must be a multiple of 48
size_t ActualDataPVecItems = WeightInitVecLength * NumBlocks[0];
size_t PaddedDataPVecItems = (size_t) 48 * ceil( (double)ActualDataPVecItems/48 );
fprintf(stderr, "[INFO] Vector P (%zu bytes) padded to %zu bytes\n", ActualDataPVecItems*8, PaddedDataPVecItems*8);
double * DataP = (double *) calloc(PaddedDataPVecItems, sizeof(double));
// Number of Ticks for each CG iteration
fprintf(stderr, "[INFO] In each iteration FPGA will run for %zu cycles.\n", NumTicks);
// Feed Weight and VWeight into Observation
size_t RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
// Parameters of current
size_t InBlockDim = BlockDim[ID];
size_t NumInBlocks = NumBlocks[ID];
size_t OutBlockDim = BlockDim[ID+1];
size_t NumOutBlocks = NumBlocks[ID+1];
size_t OutLayerSize = LayerSize[ID+1];
// Feed Weight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
Observation[RowNum*ObservVecWidth+X] = W[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else Observation[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
// Feed VWeight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (size_t X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
Observation[RowNum*ObservVecWidth+X] = VW[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else Observation[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
}
// Feed actual observation data into Observation
for (size_t iter=0; iter<NumSamples; ++iter) {
size_t InBlockDim = BlockDim[0];
size_t NumInBlocks = NumBlocks[0];
for (int addrX=0; addrX<InBlockDim; ++addrX) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[0];
if (RowNumPadded<RowNumLimit) Observation[RowNum*ObservVecWidth+X] = Observ[iter*ObservSpaceDim+RowNumPadded];
else Observation[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
// Length of BiasStd Vector
size_t BiasStdVecLength = PaddedLayerSize[NumLayers-1];
for (size_t i=1; i<NumLayers; ++i) {
BiasStdVecLength += 2*PaddedLayerSize[i];
}
double * BiasStd = (double *) calloc(BiasStdVecLength, sizeof(double));
// Feed Bias and VBias into BiasStd
RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
size_t nextLayerDim = PaddedLayerSize[ID+1];
size_t nextLayerDimLimit = LayerSize[ID+1];
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd[RowNum] = B[ID][k];
else BiasStd[RowNum] = 0;
RowNum++;
}
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd[RowNum] = VB[ID][k];
else BiasStd[RowNum] = 0;
RowNum++;
}
}
// Feed (1/Std)^2 into BiasStd
for (size_t k=0; k<PaddedLayerSize[NumLayers-1]; ++k) {
size_t LayerDimLimit = LayerSize[NumLayers-1];
if (k<LayerDimLimit) BiasStd[RowNum] = 1.0 / Std[k] / Std[k];
else BiasStd[RowNum] = 0;
RowNum++;
}
// Init FPGA
fprintf(stderr, "[INFO] Loading Model and Simulation Data...\n");
TRPO_WriteDRAM_actions_t init_action;
init_action.param_start_bytes = 0;
init_action.param_size_bytes = PaddedObservVecItems * sizeof(double);
init_action.instream_fromCPU = Observation;
TRPO_WriteDRAM_run(engine, &init_action);
//////////////////// CG - Main Loop ////////////////////
// Measuring Total Time and Total Computing Time
double runtimeComp = 0;
struct timeval tv1, tv2;
struct timeval tv3, tv4;
// Iterative Solver
gettimeofday(&tv3, NULL);
for (size_t iter=0; iter<=MaxIter; ++iter) {
// Calculate Frobenius Norm of x
double FrobNorm = 0;
gettimeofday(&tv1, NULL);
#pragma omp parallel for reduction (+:FrobNorm)
for (size_t i=0; i<NumParams; ++i) {
FrobNorm += x[i] * x[i];
}
FrobNorm = sqrt(FrobNorm);
gettimeofday(&tv2, NULL);
runtimeComp += ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
printf("CG Iter[%zu] Residual Norm=%.12e, Soln Norm=%.12e\n", iter, rdotr, FrobNorm);
// Check Termination Condition
if (rdotr<ResidualTh || iter==MaxIter) {
for (size_t i=0; i<NumParams; ++i) Result[i] = x[i];
break;
}
//////////////////// FPGA - Load p ////////////////////
// Read p into VW, VB and VLogStd
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
VW[i][j*nextLayerDim+k] = p[pos];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
VB[i][k] = p[pos];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
VLogStd[k] = p[pos];
pos++;
}
// Feed VW, VB and VLogStd into DataP
size_t RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
// Parameters of current
size_t InBlockDim = BlockDim[ID];
size_t NumInBlocks = NumBlocks[ID];
size_t OutBlockDim = BlockDim[ID+1];
size_t NumOutBlocks = NumBlocks[ID+1];
size_t OutLayerSize = LayerSize[ID+1];
// Feed Weight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
DataP[RowNum*ObservVecWidth+X] = W[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else DataP[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
// Feed VWeight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (size_t X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
DataP[RowNum*ObservVecWidth+X] = VW[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else DataP[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
}
// Pad actual observation data into DataP
bool isPadding = true;
for (size_t iter=0; iter<NumSamples && isPadding; ++iter) {
size_t InBlockDim = BlockDim[0];
size_t NumInBlocks = NumBlocks[0];
for (int addrX=0; addrX<InBlockDim && isPadding; ++addrX) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[0];
size_t posDataP = RowNum*ObservVecWidth+X;
if (posDataP<PaddedDataPVecItems) {
if (RowNumPadded<RowNumLimit) DataP[posDataP] = Observ[iter*ObservSpaceDim+RowNumPadded];
else DataP[posDataP] = 0;
}
else {
isPadding = false;
break;
}
}
RowNum++;
}
}
// Feed Bias and VBias into BiasStd
RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
size_t nextLayerDim = PaddedLayerSize[ID+1];
size_t nextLayerDimLimit = LayerSize[ID+1];
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd[RowNum] = B[ID][k];
else BiasStd[RowNum] = 0;
RowNum++;
}
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd[RowNum] = VB[ID][k];
else BiasStd[RowNum] = 0;
RowNum++;
}
}
// Feed DataP to BRAM
TRPO_WriteDRAM_actions_t write_action;
write_action.param_start_bytes = 0;
write_action.param_size_bytes = PaddedDataPVecItems * sizeof(double);
write_action.instream_fromCPU = DataP;
TRPO_WriteDRAM_run(engine, &write_action);
//////////////////// FPGA - Calc z = FIM*p ////////////////////
// Init Advanced Static Interface
TRPO_Run_actions_t run_action;
run_action.param_NumSamples = NumSamples;
run_action.param_PaddedObservVecItems = PaddedObservVecItems;
run_action.instream_BiasStd = BiasStd;
run_action.outstream_FVP = FVPResult;
// Run DFE and Measure Elapsed Time
gettimeofday(&tv1, NULL);
TRPO_Run_run(engine, &run_action);
gettimeofday(&tv2, NULL);
runtimeComp += ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
// Read FVP into z
pos = 0;
size_t FVPPos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerSizePadded = PaddedLayerSize[i];
size_t nextLayerSizePadded = PaddedLayerSize[i+1];
size_t curLayerSizeReal = LayerSize[i];
size_t nextLayerSizeReal = LayerSize[i+1];
for (size_t j=0; j<curLayerSizePadded; ++j) {
for (size_t k=0; k<nextLayerSizePadded; ++k) {
if ( (j<curLayerSizeReal) && (k<nextLayerSizeReal) ) {
z[pos] = FVPResult[FVPPos];
pos++;
}
FVPPos++;
}
}
for (size_t k=0; k<nextLayerSizePadded; ++k) {
if (k<nextLayerSizeReal) {
z[pos] = FVPResult[FVPPos];
pos++;
}
FVPPos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
z[pos] = 2 * NumSamples * VLogStd[k];
pos++;
}
gettimeofday(&tv1, NULL);
// Averaging Fisher Vector Product over the samples and apply CG Damping
#pragma omp parallel for
for (size_t i=0; i<pos; ++i) {
z[i] = z[i] / (double)NumSamples;
z[i] += CG_Damping * p[i];
}
//////////////////// FPGA - End ////////////////////
// Update x and r
double pdotz = 0;
#pragma omp parallel for reduction (+:pdotz)
for (size_t i=0; i<NumParams; ++i) {
pdotz += p[i] * z[i];
}
double v = rdotr / pdotz;
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
x[i] += v * p[i];
r[i] -= v * z[i];
}
// Update p
double newrdotr = 0;
#pragma omp parallel for reduction (+:newrdotr)
for (size_t i=0; i<NumParams; ++i) {
newrdotr += r[i] * r[i];
}
double mu = newrdotr / rdotr;
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
p[i] = r[i] + mu * p[i];
}
// Update rdotr
rdotr = newrdotr;
gettimeofday(&tv2, NULL);
runtimeComp += ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
}
gettimeofday(&tv4, NULL);
double runtimeTotal = ((tv4.tv_sec-tv3.tv_sec) * (double)1E6 + (tv4.tv_usec-tv3.tv_usec)) / (double)1E6;
fprintf(stderr, "[INFO] Total Time for FPGA is %f seconds. Pure Computing Time is %f seconds.\n", runtimeTotal, runtimeComp);
//////////////////// Clean Up ////////////////////
fprintf(stderr, "[INFO] Clean up...\n");
// Free Engine and Maxfile
max_unload(engine);
TRPO_free();
// Free Memories Allocated for Reading Files
for (size_t i=0; i<NumLayers-1; ++i) {
free(W[i]); free(VW[i]);
free(B[i]); free(VB[i]);
}
free(Observ); free(Mean); free(Std); free(VLogStd);
// Free Memories Allocated for DFE
free(Observation); free(BiasStd); free(FVPResult);
// Free Memories Allocated for CG
free(p); free(r); free(x); free(z); free(DataP);
return runtimeComp;
}
void SwimmerTest(size_t NumThreads)
{
// Swimmer-v1
char AcFunc [] = {'l', 't', 't', 'l'};
size_t LayerSize [] = {8, 64, 64, 2};
char * ModelFileName = "SwimmerTestModel.txt";
char * DataFileName = "SwimmerTestData.txt";
char * FVPFileName = "SwimmerTestFVP.txt";
TRPOparam Param;
Param.ModelFile = ModelFileName;
Param.DataFile = DataFileName;
Param.NumLayers = 4;
Param.AcFunc = AcFunc;
Param.LayerSize = LayerSize;
Param.NumSamples = 26000;
Param.CG_Damping = 0.1;
// Open Simulation Data File that contains test data
FILE *DataFilePointer = fopen(FVPFileName, "r");
if (DataFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Data File [%s]. \n", FVPFileName);
return;
}
// Memory Allocation
size_t NumParams = NumParamsCalc(Param.LayerSize, Param.NumLayers);
double * input = (double *) calloc(NumParams, sizeof(double));
double * result = (double *) calloc(NumParams, sizeof(double));
double * expect = (double *) calloc(NumParams, sizeof(double));
// Read Input and Expect
for (size_t i=0; i<NumParams; ++i) {
fscanf(DataFilePointer, "%lf %lf", &input[i], &expect[i]);
}
fclose(DataFilePointer);
double FVPStatus = FVPFast(Param, result, input, NumThreads);
if (FVPStatus<0) fprintf(stderr, "[ERROR] Fisher Vector Product Calculation Failed.\n");
// Check Result
double percentage_err = 0;
for (size_t i=0; i<NumParams; ++i) {
double cur_err = fabs( (result[i]-expect[i])/expect[i] ) * 100;
if (expect[i] != 0) percentage_err += cur_err;
if (cur_err>1) printf("FVP[%zu]=%e, Expect=%e. %.4f%% Difference\n", i, result[i], expect[i], cur_err);
}
percentage_err = percentage_err / (double)NumParams;
printf("--------------------- Swimmer Test (%zu Threads) ----------------------\n", NumThreads);
printf("[INFO] Fisher Vector Product Mean Absolute Percentage Error = %.12f%%\n", percentage_err);
printf("---------------------------------------------------------------------\n\n");
// Clean Up
free(input); free(result); free(expect);
return;
}
void SwimmerCGTest(size_t NumThreads)
{
// Swimmer-v1
char AcFunc [] = {'l', 't', 't', 'l'};
size_t LayerSize [] = {8, 64, 64, 2};
char * ModelFileName = "SwimmerTestModel.txt";
char * DataFileName = "SwimmerTestData.txt";
char * CGFileName = "SwimmerTestCG.txt";
TRPOparam Param;
Param.ModelFile = ModelFileName;
Param.DataFile = DataFileName;
Param.NumLayers = 4;
Param.AcFunc = AcFunc;
Param.LayerSize = LayerSize;
Param.NumSamples = 26000;
Param.CG_Damping = 0.1;
// Open Simulation Data File that contains test data
FILE *DataFilePointer = fopen(CGFileName, "r");
if (DataFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Data File [%s]. \n", CGFileName);
return;
}
// Memory Allocation
size_t NumParams = NumParamsCalc(Param.LayerSize, Param.NumLayers);
double * input = (double *) calloc(NumParams, sizeof(double));
double * result = (double *) calloc(NumParams, sizeof(double));
double * expect = (double *) calloc(NumParams, sizeof(double));
// Read Input and Expect
for (size_t i=0; i<NumParams; ++i) {
fscanf(DataFilePointer, "%lf %lf", &input[i], &expect[i]);
}
fclose(DataFilePointer);
printf("----------------------- Swimmer CG Test (%zu Threads) ------------------------\n", NumThreads);
double compTime = CG(Param, result, input, 10, 1e-10, NumThreads);
if (compTime<0) fprintf(stderr, "[ERROR] Conjugate Gradient Calculation Failed.\n");
// Check Result
double percentage_err = 0;
for (size_t i=0; i<NumParams; ++i) {
double cur_err = fabs( (result[i]-expect[i])/expect[i] ) * 100;
if (expect[i] != 0) percentage_err += cur_err;
if (cur_err>1) printf("CG[%zu]=%e, Expect=%e. %.4f%% Difference\n", i, result[i], expect[i], cur_err);
}
percentage_err = percentage_err / (double)NumParams;
printf("\n[INFO] CPU Computing Time = %f seconds\n", compTime);
printf("[INFO] Conjugate Gradient Mean Absolute Percentage Error = %.4f%%\n", percentage_err);
printf("---------------------------------------------------------------------\n\n");
// Clean Up
free(input); free(result); free(expect);
return;
}
void Test_FVP_FPGA() {
/*
// Swimmer-v1
char AcFunc [] = {'l', 't', 't', 'l'};
size_t LayerSize [] = { 8, 64, 64, 2};
size_t PaddedLayerSize [] = { 32, 64, 64, 8};
size_t NumBlocks [] = { 4, 4, 4, 4};
char * ModelFileName = "SwimmerTestModel.txt";
char * DataFileName = "SwimmerTestData.txt";
char * FVPFileName = "SwimmerTestFVP.txt";
*/
/*
// Ant-v1
char AcFunc [] = {'l', 't', 't', 'l'};
size_t LayerSize [] = {111, 64, 32, 8};
size_t PaddedLayerSize [] = {120, 64, 35, 8};
size_t NumBlocks [] = { 24, 8, 7, 8};
char * ModelFileName = "AntTestModel.txt";
char * DataFileName = "AntTestData.txt";
char * FVPFileName = "AntTestFVP.txt";
*/
// Humanoid-v1
char AcFunc [] = {'l', 't', 't', 'l'};
size_t LayerSize [] = {376,128, 64,17};
size_t PaddedLayerSize [] = {384,128, 66,18};
size_t NumBlocks [] = { 32, 8, 6, 6};
char * ModelFileName = "HumanoidTestModel.txt";
char * DataFileName = "HumanoidTestData.txt";
char * FVPFileName = "HumanoidTestFVP.txt";
TRPOparam Param;
Param.ModelFile = ModelFileName;
Param.DataFile = DataFileName;
Param.NumLayers = 4;
Param.AcFunc = AcFunc;
Param.LayerSize = LayerSize;
Param.PaddedLayerSize = PaddedLayerSize;
Param.NumBlocks = NumBlocks;
Param.NumSamples = 100;
Param.CG_Damping = 0.1;
// Open Simulation Data File that contains test data
FILE *DataFilePointer = fopen(FVPFileName, "r");
if (DataFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Data File [%s]. \n", FVPFileName);
return;
}
// Memory Allocation
size_t NumParams = NumParamsCalc(Param.LayerSize, Param.NumLayers);
double * input = (double *) calloc(NumParams, sizeof(double));
double * CPU_output = (double *) calloc(NumParams, sizeof(double));
double * FPGA_output = (double *) calloc(NumParams, sizeof(double));
// Read Input
for (size_t i=0; i<NumParams; ++i) {
double temp;
fscanf(DataFilePointer, "%lf %lf", &input[i], &temp);
}
fclose(DataFilePointer);
//////////////////// CPU ////////////////////
int FVPStatus = FVP(Param, CPU_output, input);
if (FVPStatus!=0) fprintf(stderr, "[ERROR] Fisher Vector Product Calculation Failed.\n");
//////////////////// FPGA ////////////////////
double runtimeFPGA = FVP_FPGA(Param, FPGA_output, input);
//////////////////// Check Results ////////////////////
// Check Result
double percentage_err = 0;
for (size_t i=0; i<NumParams; ++i) {
double cur_err = fabs( (FPGA_output[i]-CPU_output[i])/CPU_output[i] ) * 100;
if (CPU_output[i] != 0) percentage_err += cur_err;
if (cur_err>1) {
printf("FVP_FPGA[%zu]=%e, FVP_CPU[%zu]=%e. %.12f%% Difference\n", i, FPGA_output[i], i, CPU_output[i], cur_err);
}
}
// Print Results
FILE *ResultFilePointer = fopen("result.txt", "w");
if(ResultFilePointer == NULL) fprintf(stderr, "[ERROR] Open Output File Failed.\n");
for (size_t i=0; i<NumParams; ++i) {
fprintf(ResultFilePointer, "CPU_output[%4zu] = % 014.12f, FPGA_output[%4zu] = % 014.12f\n", i, CPU_output[i], i, FPGA_output[i]);
}
fclose(ResultFilePointer);
percentage_err = percentage_err / (double)NumParams;
printf("--------------------------- Test FPGA ---------------------------\n");
printf("[INFO] FPGA Computing Time = %f seconds\n", runtimeFPGA);
printf("[INFO] Mean Absolute Percentage Error = %.12f%%\n", percentage_err);
printf("---------------------------------------------------------------------\n\n");
// Clean Up
free(input); free(CPU_output); free(FPGA_output);
return;
}
void Test_CG_FPGA(size_t NumThreads)
{
/*
// Swimmer-v1
char AcFunc [] = {'l', 't', 't', 'l'};
size_t LayerSize [] = { 8, 64, 64, 2};
size_t PaddedLayerSize [] = { 32, 64, 64, 8};
size_t NumBlocks [] = { 4, 4, 4, 4};
char * ModelFileName = "SwimmerTestModel.txt";
char * DataFileName = "SwimmerTestData.txt";
char * CGFileName = "SwimmerTestCG.txt";
*/
/*
// Ant-v1
char AcFunc [] = {'l', 't', 't', 'l'};
size_t LayerSize [] = {111, 64, 32, 8};
size_t PaddedLayerSize [] = {120, 64, 35, 8};
size_t NumBlocks [] = { 24, 8, 7, 8};
char * ModelFileName = "AntTestModel.txt";
char * DataFileName = "AntTestData.txt";
char * CGFileName = "AntTestCG.txt";
*/
// Humanoid-v1
char AcFunc [] = {'l', 't', 't', 'l'};
size_t LayerSize [] = {376,128, 64,17};
size_t PaddedLayerSize [] = {384,128, 66,18};
size_t NumBlocks [] = { 32, 8, 6, 6};
char * ModelFileName = "HumanoidTestModel.txt";
char * DataFileName = "HumanoidTestData.txt";
char * CGFileName = "HumanoidTestCG.txt";
TRPOparam Param;
Param.ModelFile = ModelFileName;
Param.DataFile = DataFileName;
Param.NumLayers = 4;
Param.AcFunc = AcFunc;
Param.LayerSize = LayerSize;
Param.PaddedLayerSize = PaddedLayerSize;
Param.NumBlocks = NumBlocks;
Param.NumSamples = 50000;
Param.CG_Damping = 0.1;
// Open Simulation Data File that contains test data
FILE *DataFilePointer = fopen(CGFileName, "r");
if (DataFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Data File [%s]. \n", CGFileName);
return;
}
// Memory Allocation
size_t NumParams = NumParamsCalc(Param.LayerSize, Param.NumLayers);
double * input = (double *) calloc(NumParams, sizeof(double));
double * CPU_output = (double *) calloc(NumParams, sizeof(double));
double * FPGA_output = (double *) calloc(NumParams, sizeof(double));
// Read Input and Expect
double placeholder;
for (size_t i=0; i<NumParams; ++i) {
fscanf(DataFilePointer, "%lf %lf", &input[i], &placeholder);
}
fclose(DataFilePointer);
// FPGA-based CG Calculation
printf("\n---------------------- CG Test FPGA (%zu Threads) -----------------------\n", NumThreads);
double runtimeFPGA = CG_FPGA(Param, FPGA_output, input, 10, 1e-10, NumThreads);
if (runtimeFPGA<0) fprintf(stderr, "[ERROR] FPGA-based Conjugate Gradient Calculation Failed.\n");
// CPU-based CG Calculation
printf("---------------------- CG Test CPU (%zu Threads) -----------------------\n", NumThreads);
double runtimeCPU = CG(Param, CPU_output, input, 10, 1e-10, NumThreads);
if (runtimeCPU<0) fprintf(stderr, "[ERROR] CPU-based Conjugate Gradient Calculation Failed.\n");
// Check Result
double percentage_err = 0;
double max_percentage_err = 0;
for (size_t i=0; i<NumParams; ++i) {
double cur_err = fabs( (FPGA_output[i]-CPU_output[i])/CPU_output[i] ) * 100.0;
if (CPU_output[i] != 0) {
percentage_err += cur_err;
max_percentage_err = (max_percentage_err > cur_err) ? max_percentage_err : cur_err;
}
// if (cur_err>1) printf("CG_FPGA[%zu]=%e, CG_CPU[%zu]=%e. %.4f%% Difference\n", i, FPGA_output[i], i, CPU_output[i], cur_err);
}
// Print Results
FILE *ResultFilePointer = fopen("result.txt", "w");
if(ResultFilePointer == NULL) fprintf(stderr, "[ERROR] Open Output File Failed.\n");
for (size_t i=0; i<NumParams; ++i) {
fprintf(ResultFilePointer, "%.12f %.12f\n", CPU_output[i], FPGA_output[i]);
}
fclose(ResultFilePointer);
percentage_err = percentage_err / (double)NumParams;
printf("\n-------------------------- CG Result Check --------------------------\n");
printf("[INFO] FPGA Time = %f seconds, CPU Time = %f seconds\n", runtimeFPGA, runtimeCPU);
printf("[INFO] Mean Absolute Percentage Error = %.12f%%, Max Percentage Error = %.12f%%\n", percentage_err, max_percentage_err);
printf("---------------------------------------------------------------------\n\n");
// Clean Up
free(input); free(CPU_output); free(FPGA_output);
return;
}
int main()
{
//////////////////// Fisher Vector Product Computation ////////////////////
// SimpleTest();
// PendulumTest(6);
// SwimmerTest(6);
// SwimmerCGTest(6);
//////////////////// FPGA ////////////////////
Test_FVP_FPGA();
// Test_CG_FPGA(6);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.