source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unop__identity_uint32_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint32_int8
// op(A') function: GB_unop_tran__identity_uint32_int8
// C type: uint32_t
// A type: int8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint32_int8
(
uint32_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
meta_when_default.c | int main()
{
int n = 10;
#pragma omp metadirective when(user={condition(n<5)}:) default(parallel for)
for(int i=0; i<n; i++)
;
return 0;
}
|
par_lr_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
#define MAX_C_CONNECTIONS 100
#define HAVE_COMMON_C 1
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildStdInterp
* Comment: The interpolatory weighting can be changed with the sep_weight
* variable. This can enable not separating negative and positive
* off diagonals in the weight formula.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildStdInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int sep_weight,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
//HYPRE_BigInt *found;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int *ihat = NULL;
HYPRE_Int *ihat_offd = NULL;
HYPRE_Int *ipnt = NULL;
HYPRE_Int *ipnt_offd = NULL;
HYPRE_Int strong_f_marker = -2;
/* Interpolation weight variables */
HYPRE_Real *ahat = NULL;
HYPRE_Real *ahat_offd = NULL;
HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C;
HYPRE_Real diagonal, distribute;
HYPRE_Real alfa = 1.;
HYPRE_Real beta = 1.;
/* Loop variables */
// HYPRE_Int index;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, j1, jj, kk, k1;
HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Real wall_1 = 0;
HYPRE_Real wall_2 = 0;
HYPRE_Real wall_3 = 0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 0);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
/* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
/* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] >= 0)
{
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (CF_marker_offd[loc_col] >= 0)
{
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
/* Initialize ahat, which is a modification to a, used in the standard
* interpolation routine. */
if (n_fine)
{
ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST);
ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
ahat[i] = 0;
ihat[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
ahat_offd[i] = 0;
ihat_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if (num_procs > 1)
{
jj_begin_row_offd = jj_counter_offd;
}
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = i1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = k1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] >= 0)
{
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = loc_col;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (CF_marker_offd[loc_col] >= 0)
{
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_1 += wall_time;
fflush(NULL);
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds();
}
cnt_c = 0;
cnt_f = jj_end_row - jj_begin_row;
cnt_c_offd = 0;
cnt_f_offd = jj_end_row_offd - jj_begin_row_offd;
ihat[i] = cnt_f;
ipnt[cnt_f] = i;
ahat[cnt_f++] = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is direct neighbor */
i1 = A_diag_j[jj];
if (P_marker[i1] != strong_f_marker)
{
indx = ihat[i1];
if (indx > -1)
{
ahat[indx] += A_diag_data[jj];
}
else if (P_marker[i1] >= jj_begin_row)
{
ihat[i1] = cnt_c;
ipnt[cnt_c] = i1;
ahat[cnt_c++] += A_diag_data[jj];
}
else if (CF_marker[i1] != -3)
{
ihat[i1] = cnt_f;
ipnt[cnt_f] = i1;
ahat[cnt_f++] += A_diag_data[jj];
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
distribute = A_diag_data[jj] / A_diag_data[A_diag_i[i1]];
for (kk = A_diag_i[i1] + 1; kk < A_diag_i[i1 + 1]; kk++)
{
k1 = A_diag_j[kk];
indx = ihat[k1];
if (indx > -1)
{
ahat[indx] -= A_diag_data[kk] * distribute;
}
else if (P_marker[k1] >= jj_begin_row)
{
ihat[k1] = cnt_c;
ipnt[cnt_c] = k1;
ahat[cnt_c++] -= A_diag_data[kk] * distribute;
}
else
{
ihat[k1] = cnt_f;
ipnt[cnt_f] = k1;
ahat[cnt_f++] -= A_diag_data[kk] * distribute;
}
}
if (num_procs > 1)
{
for (kk = A_offd_i[i1]; kk < A_offd_i[i1 + 1]; kk++)
{
k1 = A_offd_j[kk];
indx = ihat_offd[k1];
if (num_functions == 1 || dof_func[i1] == dof_func_offd[k1])
{
if (indx > -1)
{
ahat_offd[indx] -= A_offd_data[kk] * distribute;
}
else if (P_marker_offd[k1] >= jj_begin_row_offd)
{
ihat_offd[k1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = k1;
ahat_offd[cnt_c_offd++] -= A_offd_data[kk] * distribute;
}
else
{
ihat_offd[k1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = k1;
ahat_offd[cnt_f_offd++] -= A_offd_data[kk] * distribute;
}
}
}
}
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] != strong_f_marker)
{
indx = ihat_offd[i1];
if (indx > -1)
{
ahat_offd[indx] += A_offd_data[jj];
}
else if (P_marker_offd[i1] >= jj_begin_row_offd)
{
ihat_offd[i1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = i1;
ahat_offd[cnt_c_offd++] += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
ihat_offd[i1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = i1;
ahat_offd[cnt_f_offd++] += A_offd_data[jj];
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
distribute = A_offd_data[jj] / A_ext_data[A_ext_i[i1]];
for (kk = A_ext_i[i1] + 1; kk < A_ext_i[i1 + 1]; kk++)
{
big_k1 = A_ext_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/*diag*/
loc_col = (HYPRE_Int)(big_k1 - col_1);
indx = ihat[loc_col];
if (indx > -1)
{
ahat[indx] -= A_ext_data[kk] * distribute;
}
else if (P_marker[loc_col] >= jj_begin_row)
{
ihat[loc_col] = cnt_c;
ipnt[cnt_c] = loc_col;
ahat[cnt_c++] -= A_ext_data[kk] * distribute;
}
else
{
ihat[loc_col] = cnt_f;
ipnt[cnt_f] = loc_col;
ahat[cnt_f++] -= A_ext_data[kk] * distribute;
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (num_functions == 1 || dof_func_offd[loc_col] == dof_func_offd[i1])
{
indx = ihat_offd[loc_col];
if (indx > -1)
{
ahat_offd[indx] -= A_ext_data[kk] * distribute;
}
else if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
ihat_offd[loc_col] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = loc_col;
ahat_offd[cnt_c_offd++] -= A_ext_data[kk] * distribute;
}
else
{
ihat_offd[loc_col] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = loc_col;
ahat_offd[cnt_f_offd++] -= A_ext_data[kk] * distribute;
}
}
}
}
}
}
}
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_2 += wall_time;
fflush(NULL);
}
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
diagonal = ahat[cnt_c];
ahat[cnt_c] = 0;
sum_pos = 0;
sum_pos_C = 0;
sum_neg = 0;
sum_neg_C = 0;
sum = 0;
sum_C = 0;
if (sep_weight == 1)
{
for (jj = 0; jj < cnt_c; jj++)
{
if (ahat[jj] > 0)
{
sum_pos_C += ahat[jj];
}
else
{
sum_neg_C += ahat[jj];
}
}
if (num_procs > 1)
{
for (jj = 0; jj < cnt_c_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos_C += ahat_offd[jj];
}
else
{
sum_neg_C += ahat_offd[jj];
}
}
}
sum_pos = sum_pos_C;
sum_neg = sum_neg_C;
for (jj = cnt_c + 1; jj < cnt_f; jj++)
{
if (ahat[jj] > 0)
{
sum_pos += ahat[jj];
}
else
{
sum_neg += ahat[jj];
}
ahat[jj] = 0;
}
if (num_procs > 1)
{
for (jj = cnt_c_offd; jj < cnt_f_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos += ahat_offd[jj];
}
else
{
sum_neg += ahat_offd[jj];
}
ahat_offd[jj] = 0;
}
}
if (sum_neg_C * diagonal != 0)
{
alfa = sum_neg / sum_neg_C / diagonal;
}
if (sum_pos_C * diagonal != 0)
{
beta = sum_pos / sum_pos_C / diagonal;
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
if (ahat[j1] > 0)
{
P_diag_data[jj] = -beta * ahat[j1];
}
else
{
P_diag_data[jj] = -alfa * ahat[j1];
}
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj = 0; jj < cnt_f; jj++)
{
ihat[ipnt[jj]] = -1;
}
if (num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
if (ahat_offd[j1] > 0)
{
P_offd_data[jj] = -beta * ahat_offd[j1];
}
else
{
P_offd_data[jj] = -alfa * ahat_offd[j1];
}
ahat_offd[j1] = 0;
}
for (jj = 0; jj < cnt_f_offd; jj++)
{
ihat_offd[ipnt_offd[jj]] = -1;
}
}
}
else
{
for (jj = 0; jj < cnt_c; jj++)
{
sum_C += ahat[jj];
}
if (num_procs > 1)
{
for (jj = 0; jj < cnt_c_offd; jj++)
{
sum_C += ahat_offd[jj];
}
}
sum = sum_C;
for (jj = cnt_c + 1; jj < cnt_f; jj++)
{
sum += ahat[jj];
ahat[jj] = 0;
}
if (num_procs > 1)
{
for (jj = cnt_c_offd; jj < cnt_f_offd; jj++)
{
sum += ahat_offd[jj];
ahat_offd[jj] = 0;
}
}
if (sum_C * diagonal != 0)
{
alfa = sum / sum_C / diagonal;
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
P_diag_data[jj] = -alfa * ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj = 0; jj < cnt_f; jj++)
{
ihat[ipnt[jj]] = -1;
}
if (num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
P_offd_data[jj] = -alfa * ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj = 0; jj < cnt_f_offd; jj++)
{
ihat_offd[ipnt_offd[jj]] = -1;
}
}
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_3 += wall_time;
fflush(NULL);
}
}
}
if (debug_flag == 4)
{
hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n",
my_id, wall_1, wall_2, wall_3);
fflush(NULL);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] == -3)
{
CF_marker[i] = -1;
}
}
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(ahat, HYPRE_MEMORY_HOST);
hypre_TFree(ihat, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt, HYPRE_MEMORY_HOST);
if (full_off_procNodes)
{
hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPIInterpHost(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime();
#endif
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Threading variables */
HYPRE_Int my_thread_num, num_threads, start, stop;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
HYPRE_Int * diag_offset;
HYPRE_Int * fine_to_coarse_offset;
HYPRE_Int * offd_offset;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/* This function is smart enough to check P_marker and P_marker_offd only,
* and set them if they are not NULL. The other vectors are set regardless.*/
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
/*-----------------------------------------------------------------------
* Initialize threading variables
*-----------------------------------------------------------------------*/
max_num_threads[0] = hypre_NumThreads();
diag_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
fine_to_coarse_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
offd_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_threads[0]; i++)
{
diag_offset[i] = 0;
fine_to_coarse_offset[i] = 0;
offd_offset[i] = 0;
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,start,stop,coarse_counter,jj_counter,jj_counter_offd, P_marker, P_marker_offd,jj,kk,i1,k1,loc_col,jj_begin_row,jj_begin_row_offd,jj_end_row,jj_end_row_offd,diagonal,sum,sgn,jj1,i2,distribute,strong_f_marker, big_k1)
#endif
{
/* Parallelize by computing only over each thread's range of rows.
*
* The first large for loop computes ~locally~ for each thread P_diag_i,
* P_offd_i and fine_to_coarse. Then, the arrays are stitched together
* For eaxample the first phase would compute
* P_diag_i = [0, 2, 4, 7, 2, 5, 6]
* for two threads. P_diag_i[stop] points to the end of that
* thread's data, but P_diag_i[start] points to the end of the
* previous thread's row range. This is then stitched together at the
* end to yield,
* P_diag_i = [0, 2, 4, 7, 9, 14, 15].
*
* The second large for loop computes interpolation weights and is
* relatively straight-forward to thread.
*/
/* initialize thread-wise variables */
strong_f_marker = -2;
coarse_counter = 0;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (n_fine)
{
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{ P_marker[i] = -1; }
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
for (i = 0; i < full_off_procNodes; i++)
{ P_marker_offd[i] = -1;}
}
/* this thread's row range */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
start = (n_fine / num_threads) * my_thread_num;
if (my_thread_num == num_threads - 1)
{ stop = n_fine; }
else
{ stop = (n_fine / num_threads) * (my_thread_num + 1); }
/* loop over rows */
/* This loop counts the number of elements in P */
/* is done by counting the elmements in the index set C-hat */
for (i = start; i < stop; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
/* row in P corresponding to a coarse pt., will only require one element (1 on the diagonal). */
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
/* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
/* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* End loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
P_diag_i[stop] = jj_counter;
P_offd_i[stop] = jj_counter_offd;
fine_to_coarse_offset[my_thread_num] = coarse_counter;
diag_offset[my_thread_num] = jj_counter;
offd_offset[my_thread_num] = jj_counter_offd;
/* Stitch P_diag_i, P_offd_i and fine_to_coarse together */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
/* Calculate the offset for P_diag_i and P_offd_i for each thread */
for (i = 1; i < num_threads; i++)
{
diag_offset[i] = diag_offset[i - 1] + diag_offset[i];
fine_to_coarse_offset[i] = fine_to_coarse_offset[i - 1] + fine_to_coarse_offset[i];
offd_offset[i] = offd_offset[i - 1] + offd_offset[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
{
/* update row pointer array with offset,
* making sure to update the row stop index */
for (i = start + 1; i <= stop; i++)
{
P_diag_i[i] += diag_offset[my_thread_num - 1];
P_offd_i[i] += offd_offset[my_thread_num - 1];
}
/* update fine_to_coarse by offsetting with the offset
* from the preceding thread */
for (i = start; i < stop; i++)
{
if (fine_to_coarse[i] >= 0)
{ fine_to_coarse[i] += fine_to_coarse_offset[my_thread_num - 1]; }
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
}
/* Fine to coarse mapping */
if (num_procs > 1 && my_thread_num == 0)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = start; i < stop; i++)
{
jj_begin_row = P_diag_i[i];
jj_begin_row_offd = P_offd_i[i];
jj_counter = jj_begin_row;
jj_counter_offd = jj_begin_row_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i. */
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if ((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn * A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute * A_diag_data[jj1];
if (i2 == i && (sgn * A_diag_data[jj1]) < 0)
{
diagonal += distribute * A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute * A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row || loc_col == i)
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute *
A_ext_data[jj1];
if (loc_col == i)
{
diagonal += distribute * A_ext_data[jj1];
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute *
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
/*-----------------------------------------------------------------------
* End large for loop over nfine
*-----------------------------------------------------------------------*/
if (n_fine)
{
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
}
/*-----------------------------------------------------------------------
* End PAR_REGION
*-----------------------------------------------------------------------*/
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime();
#endif
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] == -3)
{
CF_marker[i] = -1;
}
}
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(diag_offset, HYPRE_MEMORY_HOST);
hypre_TFree(offd_offset, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offset, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtPICCInterp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPICCInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int **ext_p, **ext_p_offd;*/
/*HYPRE_Int ccounter_offd;
HYPRE_Int *clist_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/*HYPRE_Int ccounter;
HYPRE_Int *clist, ccounter;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/*clist = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS);
for (i = 0; i < MAX_C_CONNECTIONS; i++)
clist[i] = 0;
if (num_procs > 1)
{
clist_offd = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS, HYPRE_MEMORY_HOST);
for (i = 0; i < MAX_C_CONNECTIONS; i++)
clist_offd[i] = 0;
}*/
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
/* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
/*clist[ccounter++] = i1;*/
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
/*qsort0(clist,0,ccounter-1);*/
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
/* search through offd to find all c neighbors */
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
/* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < P_offd_i[i])
{
/*clist_offd[ccounter_offd++] = i1;*/
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
/*qsort0(clist_offd,0,ccounter_offd-1);*/
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{
/* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
/*if (hypre_BinarySearch(clist,k1,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = S_diag_i[i1+1];
}*/
}
}
if (num_procs > 1 && common_c == 0)
{
/* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
/* k1 is a c point check if it is common */
/*if (hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0)
{*/
common_c = 1;
break;
/*kk = S_offd_i[i1+1];
}*/
}
}
}
if (!common_c)
{
/* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
/*break;*/
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == -1)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] == 2)
{
/*if (hypre_BinarySearch(clist,loc_col,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
else
{
loc_col = (HYPRE_BigInt)(-big_k1 - 1);
if (CF_marker_offd[loc_col] == 2)
{
/*if (hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >=
0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
/*break;*/
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
/* search through offd to find all c neighbors */
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == 2)
{
/* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if (num_procs > 1)
{
jj_begin_row_offd = jj_counter_offd;
}
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*clist[ccounter++] = i1;*/
}
}
}
/*qsort0(clist,0,ccounter-1);*/
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*clist_offd[ccounter_offd++] = i1;*/
}
}
}
/*qsort0(clist_offd,0,ccounter_offd-1);*/
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search through F points */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
/*if (hypre_BinarySearch(clist,k1,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = S_diag_i[i1+1];
}*/
}
}
if (num_procs > 1 && common_c == 0)
{
/* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
/* k1 is a c point check if it is common */
/*if (hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0)
{*/
common_c = 1;
break;
/*kk = S_offd_i[i1+1];
}*/
}
}
}
if (!common_c)
{
/* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*break;*/
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == -1)
{
/* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] == 2)
{
/*if (hypre_BinarySearch(clist,loc_col,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (CF_marker_offd[loc_col] == 2)
{
/*if (hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >=
0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*break;*/
}
}
else
{
loc_col = (-big_k1 - 1);
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if ((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn * A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute * A_diag_data[jj1];
if (i2 == i && (sgn * A_diag_data[jj1]) < 0)
{
diagonal += distribute * A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute * A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row || loc_col == i)
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute *
A_ext_data[jj1];
if (loc_col == i)
{
diagonal += distribute * A_ext_data[jj1];
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute *
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
if (CF_marker[i] == -3) { CF_marker[i] = -1; }
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
/*hypre_TFree(clist);*/
if (num_procs > 1)
{
/*hypre_TFree(clist_offd);*/
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildFFInterp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildFFInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int ccounter_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
HYPRE_Int sgn = 1;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/*HYPRE_Int ccounter;
HYPRE_Int *clist, ccounter;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
/* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
/* search through offd to find all c neighbors */
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
/* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] < 0)
{
/* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if (num_procs > 1 && common_c == 0)
{
/* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
/* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] < 0)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
/* search through offd to find all c neighbors */
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == 2)
{
/* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
jj_begin_row_offd = 0;
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if (num_procs > 1)
{
jj_begin_row_offd = jj_counter_offd;
}
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search through F points */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if (num_procs > 1 && common_c == 0)
{
/* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
/* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == -1)
{
/* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute * A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute * A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute *
A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute *
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
if (CF_marker[i] == -3) { CF_marker[i] = -1; }
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildFF1Interp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildFF1Interp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int ccounter_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
HYPRE_Int sgn = 1;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/*HYPRE_Int ccounter;*/
HYPRE_Int found_c = 0;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
/* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
/* search through offd to find all c neighbors */
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
/* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] < 0)
{
/* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if (num_procs > 1 && common_c == 0)
{
/* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
/* k1 is a c point check if it is common */
common_c = 1;
break;
}
}
}
if (!common_c)
{
/* No common c point, extend the interp set */
found_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
found_c = 1;
break;
}
}
}
if (num_procs > 1 && !found_c)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] < 0)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
/* search through offd to find all c neighbors */
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == 2)
{
/* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
jj_begin_row_offd = 0;
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if (num_procs > 1)
{
jj_begin_row_offd = jj_counter_offd;
}
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search through F points */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if (num_procs > 1 && common_c == 0)
{
/* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
/* k1 is a c point check if it is common */
common_c = 1;
break;
}
}
}
if (!common_c)
{
/* No common c point, extend the interp set */
found_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
found_c = 1;
break;
}
}
}
if (num_procs > 1 && !found_c)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == -1)
{
/* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute * A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute * A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute *
A_ext_data[jj1];
}
else
{
loc_col = - (HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute *
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
if (CF_marker[i] == -3) { CF_marker[i] = -1; }
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
/*hynre_TFree(clist);*/
if (num_procs > 1)
{
/*hypre_TFree(clist_offd);*/
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtInterpHost(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1))
{
total_global_cpts = num_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
/* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
/* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds();
}
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0)
{
sgn = -1;
}
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if ((P_marker[i2] >= jj_begin_row ) && (sgn * A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn * A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]] += distribute * A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn * A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]] += distribute * A_offd_data[jj1];
}
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row )
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
{
P_diag_data[P_marker[loc_col]] += distribute * A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[loc_col]] += distribute * A_ext_data[jj1];
}
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] == -3)
{
CF_marker[i] = -1;
}
}
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("ExtInterp");
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_BoomerAMGBuildExtInterpDevice(A, CF_marker, S, num_cpts_global, num_functions,
dof_func,
debug_flag, trunc_factor, max_elmts, P_ptr);
}
else
#endif
{
ierr = hypre_BoomerAMGBuildExtInterpHost(A, CF_marker, S, num_cpts_global, num_functions, dof_func,
debug_flag, trunc_factor, max_elmts, P_ptr);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return ierr;
}
/*-----------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPIInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("ExtPIInterp");
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_BoomerAMGBuildExtPIInterpDevice(A, CF_marker, S, num_cpts_global, num_functions,
dof_func,
debug_flag, trunc_factor, max_elmts, P_ptr);
}
else
#endif
{
ierr = hypre_BoomerAMGBuildExtPIInterpHost(A, CF_marker, S, num_cpts_global, num_functions,
dof_func,
debug_flag, trunc_factor, max_elmts, P_ptr);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return ierr;
}
|
tally.h | #ifndef OPENMC_TALLIES_TALLY_H
#define OPENMC_TALLIES_TALLY_H
#include "openmc/constants.h"
#include "openmc/tallies/filter.h"
#include "openmc/tallies/trigger.h"
#include <gsl/gsl>
#include "pugixml.hpp"
#include "xtensor/xfixed.hpp"
#include "xtensor/xtensor.hpp"
#include <memory> // for unique_ptr
#include <unordered_map>
#include <string>
#include <vector>
namespace openmc {
//==============================================================================
//! A user-specified flux-weighted (or current) measurement.
//==============================================================================
class Tally {
public:
//----------------------------------------------------------------------------
// Constructors, destructors, factory functions
explicit Tally(int32_t id);
explicit Tally(pugi::xml_node node);
~Tally();
static Tally* create(int32_t id = -1);
//----------------------------------------------------------------------------
// Accessors
void set_id(int32_t id);
void set_active(bool active) { active_ = active; }
void set_writable(bool writable) { writable_ = writable; }
void set_scores(pugi::xml_node node);
void set_scores(const std::vector<std::string>& scores);
void set_nuclides(pugi::xml_node node);
void set_nuclides(const std::vector<std::string>& nuclides);
const std::vector<int32_t>& filters() const {return filters_;}
int32_t filters(int i) const {return filters_[i];}
void set_filters(gsl::span<Filter*> filters);
int32_t strides(int i) const {return strides_[i];}
int32_t n_filter_bins() const {return n_filter_bins_;}
bool writable() const { return writable_;}
//----------------------------------------------------------------------------
// Other methods.
void add_filter(Filter* filter) { set_filters({&filter, 1}); }
void init_triggers(pugi::xml_node node);
void init_results();
void reset();
void accumulate();
//----------------------------------------------------------------------------
// Major public data members.
int id_ {C_NONE}; //!< User-defined identifier
std::string name_; //!< User-defined name
int type_ {TALLY_VOLUME}; //!< e.g. volume, surface current
//! Event type that contributes to this tally
int estimator_ {ESTIMATOR_TRACKLENGTH};
//! Whether this tally is currently being updated
bool active_ {false};
//! Number of realizations
int n_realizations_ {0};
std::vector<int> scores_; //!< Filter integrands (e.g. flux, fission)
//! Index of each nuclide to be tallied. -1 indicates total material.
std::vector<int> nuclides_ {-1};
//! True if this tally has a bin for every nuclide in the problem
bool all_nuclides_ {false};
//! Results for each bin -- the first dimension of the array is for scores
//! (e.g. flux, total reaction rate, fission reaction rate, etc.) and the
//! second dimension of the array is for the combination of filters
//! (e.g. specific cell, specific energy group, etc.)
xt::xtensor<double, 3> results_;
//! True if this tally should be written to statepoint files
bool writable_ {true};
//----------------------------------------------------------------------------
// Miscellaneous public members.
// We need to have quick access to some filters. The following gives indices
// for various filters that could be in the tally or C_NONE if they are not
// present.
int energyout_filter_ {C_NONE};
int delayedgroup_filter_ {C_NONE};
bool depletion_rx_ {false}; //!< Has depletion reactions (e.g. (n,2n))
std::vector<Trigger> triggers_;
int deriv_ {C_NONE}; //!< Index of a TallyDerivative object for diff tallies.
private:
//----------------------------------------------------------------------------
// Private data.
std::vector<int32_t> filters_; //!< Filter indices in global filters array
//! Index strides assigned to each filter to support 1D indexing.
std::vector<int32_t> strides_;
int32_t n_filter_bins_ {0};
gsl::index index_;
};
//==============================================================================
// Global variable declarations
//==============================================================================
namespace model {
extern std::vector<std::unique_ptr<Tally>> tallies;
extern std::vector<int> active_tallies;
extern std::vector<int> active_analog_tallies;
extern std::vector<int> active_tracklength_tallies;
extern std::vector<int> active_collision_tallies;
extern std::vector<int> active_meshsurf_tallies;
extern std::vector<int> active_surface_tallies;
extern std::unordered_map<int, int> tally_map;
}
namespace simulation {
//! Global tallies (such as k-effective estimators)
extern xt::xtensor_fixed<double, xt::xshape<N_GLOBAL_TALLIES, 3>> global_tallies;
//! Number of realizations for global tallies
extern "C" int32_t n_realizations;
}
// It is possible to protect accumulate operations on global tallies by using an
// atomic update. However, when multiple threads accumulate to the same global
// tally, it can cause a higher cache miss rate due to invalidation. Thus, we
// use threadprivate variables to accumulate global tallies and then reduce at
// the end of a generation.
extern double global_tally_absorption;
extern double global_tally_collision;
extern double global_tally_tracklength;
extern double global_tally_leakage;
#pragma omp threadprivate(global_tally_absorption, global_tally_collision, \
global_tally_tracklength, global_tally_leakage)
//==============================================================================
// Non-member functions
//==============================================================================
//! Read tally specification from tallies.xml
void read_tallies_xml();
//! \brief Accumulate the sum of the contributions from each history within the
//! batch to a new random variable
void accumulate_tallies();
//! Determine which tallies should be active
void setup_active_tallies();
// Alias for the type returned by xt::adapt(...). N is the dimension of the
// multidimensional array
template <std::size_t N>
using adaptor_type = xt::xtensor_adaptor<xt::xbuffer_adaptor<double*&, xt::no_ownership>, N>;
#ifdef OPENMC_MPI
//! Collect all tally results onto master process
void reduce_tally_results();
#endif
void free_memory_tally();
} // namespace openmc
#endif // OPENMC_TALLIES_TALLY_H
|
omp_doacross.c | // RUN: %libomp-compile-and-run
// XFAIL: gcc-4, gcc-5, clang-3.7, clang-3.8, icc-15, icc-16
#include <stdio.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#ifndef N
#define N 750
#endif
int test_doacross() {
int i, j;
// Allocate and zero out the matrix
int *m = (int *)malloc(sizeof(int) * N * N);
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
m[i * N + j] = 0;
}
}
// Have first row and column be 0, 1, 2, 3, etc.
for (i = 0; i < N; ++i)
m[i * N] = i;
for (j = 0; j < N; ++j)
m[j] = j;
// Perform wavefront which results in matrix:
// 0 1 2 3 4
// 1 2 3 4 5
// 2 3 4 5 6
// 3 4 5 6 7
// 4 5 6 7 8
#pragma omp parallel shared(m)
{
int row, col;
#pragma omp for ordered(2)
for (row = 1; row < N; ++row) {
for (col = 1; col < N; ++col) {
#pragma omp ordered depend(sink : row - 1, col) depend(sink : row, col - 1)
m[row * N + col] = m[(row - 1) * N + col] + m[row * N + (col - 1)] -
m[(row - 1) * N + (col - 1)];
#pragma omp ordered depend(source)
}
}
}
// Check the bottom right element to see if iteration dependencies were held
int retval = (m[(N - 1) * N + N - 1] == 2 * (N - 1));
free(m);
return retval;
}
int main(int argc, char **argv) {
int i;
int num_failed = 0;
if (omp_get_max_threads() < 2)
omp_set_num_threads(4);
for (i = 0; i < REPETITIONS; i++) {
if (!test_doacross()) {
num_failed++;
}
}
return num_failed;
}
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *);
typedef MagickBooleanType
DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *, DDSVector4 *, unsigned char *, size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT3(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT5(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadUncompressedRGB(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
ReadUncompressedRGBA(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *),
WriteMipmaps(Image *,const ImageInfo*,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,const MagickBooleanType,
ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3* end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4* points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = 1.0f / MagickMax(w.x,MagickMax(w.y,w.z));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
Image
*image;
MagickBooleanType
status,
cubemap,
volume,
read_mipmaps;
PixelTrait
alpha_trait;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cubemap=MagickFalse,
volume=MagickFalse,
read_mipmaps=MagickFalse;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if (num_images < 1)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
option=GetImageOption(image_info,"dds:skip-mipmaps");
if (IsStringFalse(option) != MagickFalse)
read_mipmaps=MagickTrue;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
/* Start a new image */
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->alpha_trait=alpha_trait;
image->compression=compression;
image->columns=dds_info.width;
image->rows=dds_info.height;
image->storage_class=DirectClass;
image->endian=LSBEndian;
image->depth=8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y,
DDSColors colors,size_t bits,Quantum *q)
{
register ssize_t
i;
ssize_t
j;
unsigned char
code;
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q);
if ((colors.a[code] != 0) &&
(image->alpha_trait == UndefinedPixelTrait))
return(MagickFalse);
q+=GetPixelChannels(image);
}
}
}
return(MagickTrue);
}
static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
status=MagickTrue;
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(MagickFalse);
image=SyncNextImageInList(image);
status=SetImageExtent(image,w,h,exception);
if (status == MagickFalse)
break;
status=decoder(image,dds_info,exception);
if (status == MagickFalse)
break;
if ((w == 1) && (h == 1))
break;
w=DIV2(w);
h=DIV2(h);
}
}
return(status);
}
static MagickBooleanType ReadDXT1Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
x;
size_t
bits;
ssize_t
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 8 bytes of data from the image */
c0=ReadBlobLSBShort(image);
c1=ReadBlobLSBShort(image);
bits=ReadBlobLSBLong(image);
CalculateColors(c0,c1,&colors,MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse)
{
/* Correct alpha */
SetImageAlpha(image,QuantumRange,exception);
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q != (Quantum *) NULL)
SetDXT1Pixels(image,x,y,colors,bits,q);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->rows && (y + j) < (ssize_t) image->columns)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
MagickSizeType
alpha_bits;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGBPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
x, y;
unsigned short
color;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q);
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType,exception);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBAPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleAlphaType,exception);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else if (alphaBits == 2)
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(color >> 8)),q);
SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q);
}
else
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)),q);
}
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)w*h*pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if ((clusterFit == MagickFalse) || (count == 0))
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image, ExceptionInfo *exception)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
fromlist,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace,exception);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (image->alpha_trait == UndefinedPixelTrait)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
mipmaps=0;
fromlist=MagickFalse;
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
{
if (LocaleNCompare(option,"fromlist",8) == 0)
{
Image
*next;
fromlist=MagickTrue;
next=image->next;
while(next != (Image *) NULL)
{
mipmaps++;
next=next->next;
}
}
}
if ((mipmaps == 0) &&
((image->columns & (image->columns - 1)) == 0) &&
((image->rows & (image->rows - 1)) == 0))
{
maxMipmaps=SIZE_MAX;
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
exception);
if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression,
mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse))
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MagickPathExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) ResetMagickMemory(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) // bitcount / masks
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) // ddscaps2 + reserved region
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
register const Quantum
*p;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const Quantum *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(image,p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p+=GetPixelChannels(image);
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value, const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char *indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info,
const size_t pixelFormat,const size_t compression,const size_t mipmaps,
const MagickBooleanType fromlist,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha,ExceptionInfo *exception)
{
const char
*option;
Image
*mipmap_image,
*resize_image;
MagickBooleanType
fast_mipmaps,
status;
register ssize_t
i;
size_t
columns,
rows;
columns=DIV2(image->columns);
rows=DIV2(image->rows);
option=GetImageOption(image_info,"dds:fast-mipmaps");
fast_mipmaps=IsStringTrue(option);
mipmap_image=image;
resize_image=image;
status=MagickTrue;
for (i=0; i < (ssize_t) mipmaps; i++)
{
if (fromlist == MagickFalse)
{
mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter,
exception);
if (mipmap_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
}
else
{
mipmap_image=mipmap_image->next;
if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows))
ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported",
image->filename);
}
DestroyBlob(mipmap_image);
mipmap_image->blob=ReferenceBlob(image->blob);
WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
if (fromlist == MagickFalse)
{
if (fast_mipmaps == MagickFalse)
mipmap_image=DestroyImage(mipmap_image);
else
{
if (resize_image != image)
resize_image=DestroyImage(resize_image);
resize_image=mipmap_image;
}
}
columns=DIV2(columns);
rows=DIV2(rows);
}
if (resize_image != image)
resize_image=DestroyImage(resize_image);
return(status);
}
static void WriteSingleColorFit(Image *image, const DDSVector4 *points,
const ssize_t *map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p)));
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
}
}
|
openmp.c | // Copyright 2020 ETH Zurich and University of Bologna.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
#include "dm.h"
#include "encoding.h"
#include "eu.h"
#include "omp.h"
#include "printf.h"
#include "snrt.h"
#define AXPY_N 64
#define NUMTHREADS 8
// Test output printf
#define tprintf(...) printf(__VA_ARGS__)
// #define tprintf(...) while (0)
// Trace printf for debugging
// #define ttprintf(...) printf(__VA_ARGS__)
#define ttprintf(...) while (0)
volatile static uint32_t sum = 0;
unsigned __attribute__((noinline)) static_schedule(void) {
static double *data_x, *data_y, data_a;
data_x = snrt_l1alloc(sizeof(double) * AXPY_N);
data_y = snrt_l1alloc(sizeof(double) * AXPY_N);
// Init data
data_a = 10.0;
for (unsigned i = 0; i < AXPY_N; i++) {
data_x[i] = (double)(i);
data_y[i] = (double)(i + 1);
}
// compute
#pragma omp parallel firstprivate(data_a, data_x, data_y)
{
// DM, rep, bound, stride, data
__builtin_ssr_setup_1d_r(0, 0, AXPY_N / NUMTHREADS - 1, sizeof(double),
&data_x[AXPY_N / 8 * omp_get_thread_num()]);
__builtin_ssr_setup_1d_r(1, 0, AXPY_N / NUMTHREADS - 1, sizeof(double),
&data_y[AXPY_N / 8 * omp_get_thread_num()]);
__builtin_ssr_setup_1d_w(2, 0, AXPY_N / NUMTHREADS - 1, sizeof(double),
&data_y[AXPY_N / 8 * omp_get_thread_num()]);
__builtin_ssr_enable();
#pragma omp for schedule(static)
for (unsigned i = 0; i < AXPY_N; i++) {
// data_y[i] = data_a * data_x[i] + data_y[i];
// data_y[i] = data_a * __builtin_ssr_pop(0) + __builtin_ssr_pop(1);
__builtin_ssr_push(
2, data_a * __builtin_ssr_pop(0) + __builtin_ssr_pop(1));
}
__builtin_ssr_disable();
}
// check data
unsigned errs = 0;
double gold;
for (unsigned i = 0; i < AXPY_N; i++) {
gold = 10.0 * (double)(i) + (double)(i + 1);
if ((gold - data_y[i]) * (gold - data_y[i]) > 0.01) errs++;
}
if (errs) tprintf("Error [static_schedule]: %d mismatches\n", errs);
return errs ? 1 : 0;
}
unsigned __attribute__((noinline)) paralell_section(void) {
unsigned tx = read_csr(minstret);
static volatile uint32_t sum = 0;
// the following code is executed by all harts
#pragma omp parallel
{
tx = read_csr(minstret) - tx;
__atomic_add_fetch(&sum, 10, __ATOMIC_RELAXED);
}
return sum != 8 * 10;
}
#define DATASIZE 4 * 1024
#define TILESIZE (DATASIZE / 4)
#define NTHREADS 8
#include "data.h"
unsigned __attribute__((noinline)) double_buffering(void) {
static double *bufx, *bufy, *x, *y;
static double a;
bufx = snrt_l1alloc(sizeof(double) * 2 * TILESIZE);
bufy = snrt_l1alloc(sizeof(double) * 2 * TILESIZE);
x = axpy_4096_x;
y = axpy_4096_y;
a = axpy_4096_a;
#pragma omp parallel firstprivate(bufx, bufy, x, y, a)
{
int tile;
int thread_id = omp_get_thread_num();
// first copy-in
if (thread_id == 0) {
ttprintf("copy-in t: %d\n", 0);
dm_memcpy_async((void *)bufx, (void *)x, sizeof(double) * TILESIZE);
dm_memcpy_async((void *)bufy, (void *)y, sizeof(double) * TILESIZE);
dm_wait();
}
#pragma omp barrier
for (tile = 0; tile < DATASIZE; tile += TILESIZE) {
// copy
if (thread_id == 0) {
// copy-out
if (tile > 0) {
ttprintf("copy-out t: %d\n", tile);
dm_memcpy_async(
(void *)&x[tile - TILESIZE],
(void *)&bufx[TILESIZE * ((tile / TILESIZE + 1) % 2)],
sizeof(double) * TILESIZE);
}
// copy-in
if (tile < DATASIZE - TILESIZE) {
ttprintf("copy-in t: %d\n", tile);
dm_memcpy_async(
(void *)&bufx[TILESIZE * ((tile / TILESIZE + 1) % 2)],
(void *)&x[tile + TILESIZE], sizeof(double) * TILESIZE);
dm_memcpy_async(
(void *)&bufy[TILESIZE * ((tile / TILESIZE + 1) % 2)],
(void *)&y[tile + TILESIZE], sizeof(double) * TILESIZE);
}
dm_start();
}
// compute
// if (thread_id == 0)
// for (int i = 0; i < TILESIZE; ++i)
// tprintf(" %3d x %3.2f y %3.2f\n", i,
// bufx[TILESIZE * ((tile / TILESIZE) % 2) + i],
// bufy[TILESIZE * ((tile / TILESIZE) % 2) + i]);
__builtin_ssr_setup_1d_r(0, 0, TILESIZE / NTHREADS - 1,
sizeof(double),
&bufx[TILESIZE * ((tile / TILESIZE) % 2) +
thread_id * TILESIZE / NTHREADS]);
__builtin_ssr_setup_1d_r(1, 0, TILESIZE / NTHREADS - 1,
sizeof(double),
&bufy[TILESIZE * ((tile / TILESIZE) % 2) +
thread_id * TILESIZE / NTHREADS]);
__builtin_ssr_setup_1d_w(2, 0, TILESIZE / NTHREADS - 1,
sizeof(double),
&bufx[TILESIZE * ((tile / TILESIZE) % 2) +
thread_id * TILESIZE / NTHREADS]);
__builtin_ssr_enable();
asm volatile(
// Computation
"frep.o %[ldec], 1, 0, 0b0000 \n"
"fmadd.d ft2, %[a], ft0, ft1 \n" ::[a] "fr"(a),
[ ldec ] "r"(TILESIZE / NTHREADS - 1)
: "memory", "ft0", "ft1", "ft2");
__builtin_ssr_barrier(2);
__builtin_ssr_disable();
// copy barrier
if (thread_id == 0) dm_wait();
#pragma omp barrier
}
// last copy-out
if (thread_id == 0) {
dm_memcpy_async(
(void *)&x[tile - TILESIZE],
(void *)&bufx[TILESIZE * ((tile / TILESIZE + 1) % 2)],
sizeof(double) * TILESIZE);
dm_wait();
}
}
// Verify result
// double mse = 0.0, gold;
// for (int i = 0; i < DATASIZE; ++i) {
// gold = axpy_4096_g[i];
// mse += (gold - x[i]) * (gold - x[i]);
// }
// mse = mse * 1.0 * (1.0 / (double)(DATASIZE));
// tprintf("mse = %f\n", mse);
// if (mse > 0.0001) return 1;
return 0;
}
int main() {
unsigned core_idx = snrt_cluster_core_idx();
unsigned core_num = snrt_cluster_core_num();
unsigned err = 0;
__snrt_omp_bootstrap(core_idx);
tprintf("Static schedule test\n");
err |= static_schedule() << 0;
OMP_PROF(omp_print_prof());
tprintf("Launch overhead test\n");
err |= paralell_section() << 1;
OMP_PROF(omp_print_prof());
tprintf("Double buffering test\n");
err |= double_buffering() << 2;
OMP_PROF(omp_print_prof());
// exit
__snrt_omp_destroy(core_idx);
return err;
}
|
tree.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/dataset.h>
#include <LightGBM/meta.h>
#include <string>
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
namespace LightGBM {
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
* \param track_branch_features Whether to keep track of ancestors of leaf nodes
*/
explicit Tree(int max_leaves, bool track_branch_features);
/*!
* \brief Constructor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
~Tree();
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \param missing_type missing type
* \param default_left default direction for missing value
* \return The index of new leaf.
*/
int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
double threshold_double, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight,
float gain, MissingType missing_type, bool default_left);
/*!
* \brief Performing a split on tree leaves, with categorical feature
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split, use bitset to represent
* \param num_threshold_bin size of threshold_bin
* \param threshold Thresholds of real feature value, use bitset to represent
* \param num_threshold size of threshold
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin,
const uint32_t* threshold, int num_threshold, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = MaybeRoundToZero(output);
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Get upper bound leaf value of this tree model
*/
double GetUpperBoundValue() const;
/*!
* \brief Get lower bound leaf value of this tree model
*/
double GetLowerBoundValue() const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get parent of specific leaf*/
inline int leaf_parent(int leaf_idx) const {return leaf_parent_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
/*! \brief Get features on leaf's branch*/
inline std::vector<int> branch_features(int leaf) const { return branch_features_[leaf]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
inline double internal_value(int node_idx) const {
return internal_value_[node_idx];
}
inline bool IsNumericalSplit(int node_idx) const {
return !GetDecisionType(decision_type_[node_idx], kCategoricalMask);
}
inline int left_child(int node_idx) const { return left_child_[node_idx]; }
inline int right_child(int node_idx) const { return right_child_[node_idx]; }
inline int split_feature_inner(int node_idx) const {
return split_feature_inner_[node_idx];
}
inline uint32_t threshold_in_bin(int node_idx) const {
return threshold_in_bin_[node_idx];
}
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the training process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] * rate);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] * rate);
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] * rate);
shrinkage_ *= rate;
}
inline double shrinkage() const { return shrinkage_; }
inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] + val);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] + val);
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] + val);
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool predict_leaf_index) const;
inline static bool IsZero(double fval) {
return (fval >= -kZeroThreshold && fval <= kZeroThreshold);
}
inline static double MaybeRoundToZero(double fval) {
return IsZero(fval) ? 0 : fval;
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
int NextLeafId() const { return num_leaves_; }
private:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval) && missing_type != MissingType::NaN) {
fval = 0.0f;
}
if ((missing_type == MissingType::Zero && IsZero(fval))
|| (missing_type == MissingType::NaN && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == MissingType::Zero && fval == default_bin)
|| (missing_type == MissingType::NaN && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
int int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];;
} else if (std::isnan(fval)) {
// NaN is always in the right
if (missing_type == MissingType::NaN) {
return right_child_[node];
}
int_fval = 0;
}
int cat_idx = static_cast<int>(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = static_cast<int>(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current leaves*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and missing value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief weight of leaves */
std::vector<double> leaf_weight_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief weight of non-leaf nodes */
std::vector<double> internal_weight_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
/*! \brief whether to keep track of ancestor nodes for each leaf (only needed when feature interactions are restricted) */
bool track_branch_features_;
/*! \brief Features on leaf's branch, original index */
std::vector<std::vector<int>> branch_features_;
double shrinkage_;
int max_depth_;
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = gain;
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_weight_[new_node_idx] = leaf_weight_[leaf];
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_weight_[leaf] = left_weight;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_weight_[num_leaves_] = right_weight;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
if (track_branch_features_) {
branch_features_[num_leaves_] = branch_features_[leaf];
branch_features_[num_leaves_].push_back(split_feature_[new_node_idx]);
branch_features_[leaf].push_back(split_feature_[new_node_idx]);
}
}
inline double Tree::Predict(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK_GE(max_depth_, 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
GB_binop__bget_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_int16)
// A.*B function (eWiseMult): GB (_AemultB_01__bget_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__bget_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int16)
// C=scalar+B GB (_bind1st__bget_int16)
// C=scalar+B' GB (_bind1st_tran__bget_int16)
// C=A+scalar GB (_bind2nd__bget_int16)
// C=A'+scalar GB (_bind2nd_tran__bget_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_BITGET (aij, bij, int16_t, 16)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITGET (x, y, int16_t, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT16 || GxB_NO_BGET_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bget_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITGET (x, bij, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITGET (aij, y, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (x, aij, int16_t, 16) ; \
}
GrB_Info GB (_bind1st_tran__bget_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (aij, y, int16_t, 16) ; \
}
GrB_Info GB (_bind2nd_tran__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
actionAngleAdiabatic.c | /*
C code for the adiabatic approximation
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_roots.h>
#include <gsl/gsl_min.h>
#include <gsl/gsl_integration.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define CHUNKSIZE 10
//Potentials
#include <galpy_potentials.h>
#include <integrateFullOrbit.h>
#include <actionAngle.h>
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
//Macros to export functions in DLL on different OS
#if defined(_WIN32)
#define EXPORT __declspec(dllexport)
#elif defined(__GNUC__)
#define EXPORT __attribute__((visibility("default")))
#else
// Just do nothing?
#define EXPORT
#endif
/*
Structure Declarations
*/
struct JRAdiabaticArg{
double ER;
double Lz22;
int nargs;
struct potentialArg * actionAngleArgs;
};
struct JzAdiabaticArg{
double Ez;
double R;
int nargs;
struct potentialArg * actionAngleArgs;
};
/*
Function Declarations
*/
EXPORT void actionAngleAdiabatic_RperiRapZmax(int,double *,double *,double *,double *,
double *,int,int *,double *,double,
double *,double *,double *,int *);
EXPORT void actionAngleAdiabatic_actions(int,double *,double *,double *,double *,
double *,int,int *,double *,double,
double *,double *,int *);
void calcJRAdiabatic(int,double *,double *,double *,double *,double *,
int,struct potentialArg *,int);
void calcJzAdiabatic(int,double *,double *,double *,double *,int,
struct potentialArg *,int);
void calcRapRperi(int,double *,double *,double *,double *,double *,
int,struct potentialArg *);
void calcZmax(int,double *,double *,double *,double *,int,
struct potentialArg *);
double JRAdiabaticIntegrandSquared(double,void *);
double JRAdiabaticIntegrand(double,void *);
double JzAdiabaticIntegrandSquared(double,void *);
double JzAdiabaticIntegrand(double,void *);
double evaluateVerticalPotentials(double, double,int, struct potentialArg *);
/*
Actual functions, inlines first
*/
static inline void calcEREzL(int ndata,
double *R,
double *vR,
double *vT,
double *z,
double *vz,
double *ER,
double *Ez,
double *Lz,
int nargs,
struct potentialArg * actionAngleArgs){
int ii;
UNUSED int chunk= CHUNKSIZE;
#pragma omp parallel for schedule(static,chunk) private(ii)
for (ii=0; ii < ndata; ii++){
*(ER+ii)= evaluatePotentials(*(R+ii),0.,
nargs,actionAngleArgs)
+ 0.5 * *(vR+ii) * *(vR+ii)
+ 0.5 * *(vT+ii) * *(vT+ii);
*(Ez+ii)= evaluateVerticalPotentials(*(R+ii),*(z+ii),
nargs,actionAngleArgs)
+ 0.5 * *(vz+ii) * *(vz+ii);
*(Lz+ii)= *(R+ii) * *(vT+ii);
}
}
/*
MAIN FUNCTIONS
*/
void actionAngleAdiabatic_RperiRapZmax(int ndata,
double *R,
double *vR,
double *vT,
double *z,
double *vz,
int npot,
int * pot_type,
double * pot_args,
double gamma,
double *rperi,
double *rap,
double *zmax,
int * err){
int ii;
//Set up the potentials
struct potentialArg * actionAngleArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) );
parse_leapFuncArgs_Full(npot,actionAngleArgs,&pot_type,&pot_args);
//ER, Ez, Lz
double *ER= (double *) malloc ( ndata * sizeof(double) );
double *Ez= (double *) malloc ( ndata * sizeof(double) );
double *Lz= (double *) malloc ( ndata * sizeof(double) );
calcEREzL(ndata,R,vR,vT,z,vz,ER,Ez,Lz,npot,actionAngleArgs);
//Calculate peri and apocenters
double *jz= (double *) malloc ( ndata * sizeof(double) );
calcZmax(ndata,zmax,z,R,Ez,npot,actionAngleArgs);
calcJzAdiabatic(ndata,jz,zmax,R,Ez,npot,actionAngleArgs,10);
//Adjust planar effective potential for gamma
UNUSED int chunk= CHUNKSIZE;
#pragma omp parallel for schedule(static,chunk) private(ii)
for (ii=0; ii < ndata; ii++){
*(Lz+ii)= fabs( *(Lz+ii) ) + gamma * *(jz+ii);
*(ER+ii)+= 0.5 * *(Lz+ii) * *(Lz+ii) / *(R+ii) / *(R+ii)
- 0.5 * *(vT+ii) * *(vT+ii);
}
calcRapRperi(ndata,rperi,rap,R,ER,Lz,npot,actionAngleArgs);
free_potentialArgs(npot,actionAngleArgs);
free(actionAngleArgs);
free(ER);
free(Ez);
free(Lz);
free(jz);
}
void actionAngleAdiabatic_actions(int ndata,
double *R,
double *vR,
double *vT,
double *z,
double *vz,
int npot,
int * pot_type,
double * pot_args,
double gamma,
double *jr,
double *jz,
int * err){
int ii;
//Set up the potentials
struct potentialArg * actionAngleArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) );
parse_leapFuncArgs_Full(npot,actionAngleArgs,&pot_type,&pot_args);
//ER, Ez, Lz
double *ER= (double *) malloc ( ndata * sizeof(double) );
double *Ez= (double *) malloc ( ndata * sizeof(double) );
double *Lz= (double *) malloc ( ndata * sizeof(double) );
calcEREzL(ndata,R,vR,vT,z,vz,ER,Ez,Lz,npot,actionAngleArgs);
//Calculate peri and apocenters
double *rperi= (double *) malloc ( ndata * sizeof(double) );
double *rap= (double *) malloc ( ndata * sizeof(double) );
double *zmax= (double *) malloc ( ndata * sizeof(double) );
calcZmax(ndata,zmax,z,R,Ez,npot,actionAngleArgs);
calcJzAdiabatic(ndata,jz,zmax,R,Ez,npot,actionAngleArgs,10);
//Adjust planar effective potential for gamma
UNUSED int chunk= CHUNKSIZE;
#pragma omp parallel for schedule(static,chunk) private(ii)
for (ii=0; ii < ndata; ii++){
*(Lz+ii)= fabs( *(Lz+ii) ) + gamma * *(jz+ii);
*(ER+ii)+= 0.5 * *(Lz+ii) * *(Lz+ii) / *(R+ii) / *(R+ii)
- 0.5 * *(vT+ii) * *(vT+ii);
}
calcRapRperi(ndata,rperi,rap,R,ER,Lz,npot,actionAngleArgs);
calcJRAdiabatic(ndata,jr,rperi,rap,ER,Lz,npot,actionAngleArgs,10);
free_potentialArgs(npot,actionAngleArgs);
free(actionAngleArgs);
free(ER);
free(Ez);
free(Lz);
free(rperi);
free(rap);
free(zmax);
}
void calcJRAdiabatic(int ndata,
double * jr,
double * rperi,
double * rap,
double * ER,
double * Lz,
int nargs,
struct potentialArg * actionAngleArgs,
int order){
int ii, tid, nthreads;
#ifdef _OPENMP
nthreads = omp_get_max_threads();
#else
nthreads = 1;
#endif
gsl_function * JRInt= (gsl_function *) malloc ( nthreads * sizeof(gsl_function) );
struct JRAdiabaticArg * params= (struct JRAdiabaticArg *) malloc ( nthreads * sizeof (struct JRAdiabaticArg) );
for (tid=0; tid < nthreads; tid++){
(params+tid)->nargs= nargs;
(params+tid)->actionAngleArgs= actionAngleArgs;
}
//Setup integrator
gsl_integration_glfixed_table * T= gsl_integration_glfixed_table_alloc (order);
UNUSED int chunk= CHUNKSIZE;
#pragma omp parallel for schedule(static,chunk) \
private(tid,ii) \
shared(jr,rperi,rap,JRInt,params,T,ER,Lz)
for (ii=0; ii < ndata; ii++){
#ifdef _OPENMP
tid= omp_get_thread_num();
#else
tid = 0;
#endif
if ( *(rperi+ii) == -9999.99 || *(rap+ii) == -9999.99 ){
*(jr+ii)= 9999.99;
continue;
}
if ( (*(rap+ii) - *(rperi+ii)) / *(rap+ii) < 0.000001 ){//circular
*(jr+ii) = 0.;
continue;
}
//Setup function
(params+tid)->ER= *(ER+ii);
(params+tid)->Lz22= 0.5 * *(Lz+ii) * *(Lz+ii);
(JRInt+tid)->function = &JRAdiabaticIntegrand;
(JRInt+tid)->params = params+tid;
//Integrate
*(jr+ii)= gsl_integration_glfixed (JRInt+tid,*(rperi+ii),*(rap+ii),T)
* sqrt(2.) / M_PI;
}
free(JRInt);
free(params);
gsl_integration_glfixed_table_free ( T );
}
void calcJzAdiabatic(int ndata,
double * jz,
double * zmax,
double * R,
double * Ez,
int nargs,
struct potentialArg * actionAngleArgs,
int order){
int ii, tid, nthreads;
#ifdef _OPENMP
nthreads = omp_get_max_threads();
#else
nthreads = 1;
#endif
gsl_function * JzInt= (gsl_function *) malloc ( nthreads * sizeof(gsl_function) );
struct JzAdiabaticArg * params= (struct JzAdiabaticArg *) malloc ( nthreads * sizeof (struct JzAdiabaticArg) );
for (tid=0; tid < nthreads; tid++){
(params+tid)->nargs= nargs;
(params+tid)->actionAngleArgs= actionAngleArgs;
}
//Setup integrator
gsl_integration_glfixed_table * T= gsl_integration_glfixed_table_alloc (order);
UNUSED int chunk= CHUNKSIZE;
#pragma omp parallel for schedule(static,chunk) \
private(tid,ii) \
shared(jz,zmax,JzInt,params,T,Ez,R)
for (ii=0; ii < ndata; ii++){
#ifdef _OPENMP
tid= omp_get_thread_num();
#else
tid = 0;
#endif
if ( *(zmax+ii) == -9999.99 ){
*(jz+ii)= 9999.99;
continue;
}
if ( *(zmax+ii) < 0.000001 ){//circular
*(jz+ii) = 0.;
continue;
}
//Setup function
(params+tid)->Ez= *(Ez+ii);
(params+tid)->R= *(R+ii);
(JzInt+tid)->function = &JzAdiabaticIntegrand;
(JzInt+tid)->params = params+tid;
//Integrate
*(jz+ii)= gsl_integration_glfixed (JzInt+tid,0.,*(zmax+ii),T)
* 2 * sqrt(2.) / M_PI;
}
free(JzInt);
free(params);
gsl_integration_glfixed_table_free ( T );
}
void calcRapRperi(int ndata,
double * rperi,
double * rap,
double * R,
double * ER,
double * Lz,
int nargs,
struct potentialArg * actionAngleArgs){
int ii, tid, nthreads;
#ifdef _OPENMP
nthreads = omp_get_max_threads();
#else
nthreads = 1;
#endif
double peps, meps;
gsl_function * JRRoot= (gsl_function *) malloc ( nthreads * sizeof(gsl_function) );
struct JRAdiabaticArg * params= (struct JRAdiabaticArg *) malloc ( nthreads * sizeof (struct JRAdiabaticArg) );
//Setup solver
int status;
int iter, max_iter = 100;
const gsl_root_fsolver_type *T;
double R_lo, R_hi;
struct pragmasolver *s= (struct pragmasolver *) malloc ( nthreads * sizeof (struct pragmasolver) );
T = gsl_root_fsolver_brent;
for (tid=0; tid < nthreads; tid++){
(params+tid)->nargs= nargs;
(params+tid)->actionAngleArgs= actionAngleArgs;
(s+tid)->s= gsl_root_fsolver_alloc (T);
}
UNUSED int chunk= CHUNKSIZE;
gsl_set_error_handler_off();
#pragma omp parallel for schedule(static,chunk) \
private(tid,ii,iter,status,R_lo,R_hi,meps,peps) \
shared(rperi,rap,JRRoot,params,s,R,ER,Lz,max_iter)
for (ii=0; ii < ndata; ii++){
#ifdef _OPENMP
tid= omp_get_thread_num();
#else
tid = 0;
#endif
//Setup function
(params+tid)->ER= *(ER+ii);
(params+tid)->Lz22= 0.5 * *(Lz+ii) * *(Lz+ii);
(JRRoot+tid)->params = params+tid;
(JRRoot+tid)->function = &JRAdiabaticIntegrandSquared;
//Find starting points for minimum
if ( fabs(GSL_FN_EVAL(JRRoot+tid,*(R+ii))) < 0.0000001){ //we are at rap or rperi
peps= GSL_FN_EVAL(JRRoot+tid,*(R+ii)+0.0000001);
meps= GSL_FN_EVAL(JRRoot+tid,*(R+ii)-0.0000001);
if ( fabs(peps) < 0.00000001 && fabs(meps) < 0.00000001 && peps*meps >= 0.) {//circular
*(rperi+ii) = *(R+ii);
*(rap+ii) = *(R+ii);
}
else if ( peps < 0. && meps > 0. ) {//umax
*(rap+ii)= *(R+ii);
R_lo= 0.9 * (*(R+ii) - 0.0000001);
R_hi= *(R+ii) - 0.00000001;
while ( GSL_FN_EVAL(JRRoot+tid,R_lo) >= 0. && R_lo > 0.000000001){
R_hi= R_lo; //this makes sure that brent evaluates using previous
R_lo*= 0.9;
}
//Find root
status = gsl_root_fsolver_set ((s+tid)->s, JRRoot+tid, R_lo, R_hi);
if (status == GSL_EINVAL) {
*(rperi+ii) = 0.;//Assume zero if below 0.000000001
continue;
}
iter= 0;
do
{
iter++;
status = gsl_root_fsolver_iterate ((s+tid)->s);
R_lo = gsl_root_fsolver_x_lower ((s+tid)->s);
R_hi = gsl_root_fsolver_x_upper ((s+tid)->s);
status = gsl_root_test_interval (R_lo, R_hi,
9.9999999999999998e-13,
4.4408920985006262e-16);
}
while (status == GSL_CONTINUE && iter < max_iter);
// LCOV_EXCL_START
if (status == GSL_EINVAL) {//Shouldn't ever get here
*(rperi+ii) = -9999.99;
*(rap+ii) = -9999.99;
continue;
}
// LCOV_EXCL_STOP
*(rperi+ii) = gsl_root_fsolver_root ((s+tid)->s);
}
else if ( peps > 0. && meps < 0. ){//umin
*(rperi+ii)= *(R+ii);
R_lo= *(R+ii) + 0.0000001;
R_hi= 1.1 * (*(R+ii) + 0.0000001);
while ( GSL_FN_EVAL(JRRoot+tid,R_hi) >= 0. && R_hi < 37.5) {
R_lo= R_hi; //this makes sure that brent evaluates using previous
R_hi*= 1.1;
}
//Find root
status = gsl_root_fsolver_set ((s+tid)->s, JRRoot+tid, R_lo, R_hi);
if (status == GSL_EINVAL) {
*(rperi+ii) = -9999.99;
*(rap+ii) = -9999.99;
continue;
}
iter= 0;
do
{
iter++;
status = gsl_root_fsolver_iterate ((s+tid)->s);
R_lo = gsl_root_fsolver_x_lower ((s+tid)->s);
R_hi = gsl_root_fsolver_x_upper ((s+tid)->s);
status = gsl_root_test_interval (R_lo, R_hi,
9.9999999999999998e-13,
4.4408920985006262e-16);
}
while (status == GSL_CONTINUE && iter < max_iter);
// LCOV_EXCL_START
if (status == GSL_EINVAL) {//Shouldn't ever get here
*(rperi+ii) = -9999.99;
*(rap+ii) = -9999.99;
continue;
}
// LCOV_EXCL_STOP
*(rap+ii) = gsl_root_fsolver_root ((s+tid)->s);
}
}
else {
R_lo= 0.9 * *(R+ii);
R_hi= *(R+ii);
while ( GSL_FN_EVAL(JRRoot+tid,R_lo) >= 0. && R_lo > 0.000000001){
R_hi= R_lo; //this makes sure that brent evaluates using previous
R_lo*= 0.9;
}
R_hi= (R_lo < 0.9 * *(R+ii)) ? R_lo / 0.9 / 0.9: *(R+ii);
//Find root
status = gsl_root_fsolver_set ((s+tid)->s, JRRoot+tid, R_lo, R_hi);
if (status == GSL_EINVAL) {
*(rperi+ii) = 0.;//Assume zero if below 0.000000001
} else {
iter= 0;
do
{
iter++;
status = gsl_root_fsolver_iterate ((s+tid)->s);
R_lo = gsl_root_fsolver_x_lower ((s+tid)->s);
R_hi = gsl_root_fsolver_x_upper ((s+tid)->s);
status = gsl_root_test_interval (R_lo, R_hi,
9.9999999999999998e-13,
4.4408920985006262e-16);
}
while (status == GSL_CONTINUE && iter < max_iter);
// LCOV_EXCL_START
if (status == GSL_EINVAL) {//Shouldn't ever get here
*(rperi+ii) = -9999.99;
*(rap+ii) = -9999.99;
continue;
}
// LCOV_EXCL_STOP
*(rperi+ii) = gsl_root_fsolver_root ((s+tid)->s);
}
//Find starting points for maximum
R_lo= *(R+ii);
R_hi= 1.1 * *(R+ii);
while ( GSL_FN_EVAL(JRRoot+tid,R_hi) > 0. && R_hi < 37.5) {
R_lo= R_hi; //this makes sure that brent evaluates using previous
R_hi*= 1.1;
}
R_lo= (R_hi > 1.1 * *(R+ii)) ? R_hi / 1.1 / 1.1: *(R+ii);
//Find root
status = gsl_root_fsolver_set ((s+tid)->s, JRRoot+tid, R_lo, R_hi);
if (status == GSL_EINVAL) {
*(rperi+ii) = -9999.99;
*(rap+ii) = -9999.99;
continue;
}
iter= 0;
do
{
iter++;
status = gsl_root_fsolver_iterate ((s+tid)->s);
R_lo = gsl_root_fsolver_x_lower ((s+tid)->s);
R_hi = gsl_root_fsolver_x_upper ((s+tid)->s);
status = gsl_root_test_interval (R_lo, R_hi,
9.9999999999999998e-13,
4.4408920985006262e-16);
}
while (status == GSL_CONTINUE && iter < max_iter);
// LCOV_EXCL_START
if (status == GSL_EINVAL) {//Shouldn't ever get here
*(rperi+ii) = -9999.99;
*(rap+ii) = -9999.99;
continue;
}
// LCOV_EXCL_STOP
*(rap+ii) = gsl_root_fsolver_root ((s+tid)->s);
}
}
gsl_set_error_handler (NULL);
for (tid=0; tid < nthreads; tid++)
gsl_root_fsolver_free( (s+tid)->s);
free(s);
free(JRRoot);
free(params);
}
void calcZmax(int ndata,
double * zmax,
double * z,
double * R,
double * Ez,
int nargs,
struct potentialArg * actionAngleArgs){
int ii, tid, nthreads;
#ifdef _OPENMP
nthreads = omp_get_max_threads();
#else
nthreads = 1;
#endif
gsl_function * JzRoot= (gsl_function *) malloc ( nthreads * sizeof(gsl_function) );
struct JzAdiabaticArg * params= (struct JzAdiabaticArg *) malloc ( nthreads * sizeof (struct JzAdiabaticArg) );
//Setup solver
int status;
int iter, max_iter = 100;
const gsl_root_fsolver_type *T;
double z_lo, z_hi;
struct pragmasolver *s= (struct pragmasolver *) malloc ( nthreads * sizeof (struct pragmasolver) );
T = gsl_root_fsolver_brent;
for (tid=0; tid < nthreads; tid++){
(params+tid)->nargs= nargs;
(params+tid)->actionAngleArgs= actionAngleArgs;
(s+tid)->s= gsl_root_fsolver_alloc (T);
}
UNUSED int chunk= CHUNKSIZE;
gsl_set_error_handler_off();
#pragma omp parallel for schedule(static,chunk) \
private(tid,ii,iter,status,z_lo,z_hi) \
shared(zmax,JzRoot,params,s,z,Ez,R,max_iter)
for (ii=0; ii < ndata; ii++){
#ifdef _OPENMP
tid= omp_get_thread_num();
#else
tid = 0;
#endif
//Setup function
(params+tid)->Ez= *(Ez+ii);
(params+tid)->R= *(R+ii);
(JzRoot+tid)->function = &JzAdiabaticIntegrandSquared;
(JzRoot+tid)->params = params+tid;
//Find starting points for minimum
if ( fabs(GSL_FN_EVAL(JzRoot+tid,*(z+ii))) < 0.0000001){ //we are at zmax
*(zmax+ii)= fabs( *(z+ii) );
}
else {
z_lo= fabs ( *(z+ii) );
z_hi= ( *(z+ii) == 0. ) ? 0.1: 1.1 * fabs( *(z+ii) );
while ( GSL_FN_EVAL(JzRoot+tid,z_hi) >= 0. && z_hi < 37.5) {
z_lo= z_hi; //this makes sure that brent evaluates using previous
z_hi*= 1.1;
}
//Find root
status = gsl_root_fsolver_set ((s+tid)->s, JzRoot+tid, z_lo, z_hi);
if (status == GSL_EINVAL) {
*(zmax+ii) = -9999.99;
continue;
}
iter= 0;
do
{
iter++;
status = gsl_root_fsolver_iterate ((s+tid)->s);
z_lo = gsl_root_fsolver_x_lower ((s+tid)->s);
z_hi = gsl_root_fsolver_x_upper ((s+tid)->s);
status = gsl_root_test_interval (z_lo, z_hi,
9.9999999999999998e-13,
4.4408920985006262e-16);
}
while (status == GSL_CONTINUE && iter < max_iter);
// LCOV_EXCL_START
if (status == GSL_EINVAL) {//Shouldn't ever get here
*(zmax+ii) = -9999.99;
continue;
}
// LCOV_EXCL_STOP
*(zmax+ii) = gsl_root_fsolver_root ((s+tid)->s);
}
}
gsl_set_error_handler (NULL);
for (tid=0; tid < nthreads; tid++)
gsl_root_fsolver_free( (s+tid)->s);
free(s);
free(JzRoot);
free(params);
}
double JRAdiabaticIntegrand(double R,
void * p){
return sqrt(JRAdiabaticIntegrandSquared(R,p));
}
double JRAdiabaticIntegrandSquared(double R,
void * p){
struct JRAdiabaticArg * params= (struct JRAdiabaticArg *) p;
return params->ER - evaluatePotentials(R,0.,params->nargs,params->actionAngleArgs) - params->Lz22 / R / R;
}
double JzAdiabaticIntegrand(double z,
void * p){
return sqrt(JzAdiabaticIntegrandSquared(z,p));
}
double JzAdiabaticIntegrandSquared(double z,
void * p){
struct JzAdiabaticArg * params= (struct JzAdiabaticArg *) p;
return params->Ez - evaluateVerticalPotentials(params->R,z,
params->nargs,
params->actionAngleArgs);
}
double evaluateVerticalPotentials(double R, double z,
int nargs,
struct potentialArg * actionAngleArgs){
return evaluatePotentials(R,z,nargs,actionAngleArgs)
-evaluatePotentials(R,0.,nargs,actionAngleArgs);
}
|
convolution_1x1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv1x1s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#if __ARM_NEON && __aarch64__
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
Mat out2 = top_blob.channel(p+2);
Mat out3 = top_blob.channel(p+3);
Mat out4 = top_blob.channel(p+4);
Mat out5 = top_blob.channel(p+5);
Mat out6 = top_blob.channel(p+6);
Mat out7 = top_blob.channel(p+7);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
const float bias2 = bias ? bias[p+2] : 0.f;
const float bias3 = bias ? bias[p+3] : 0.f;
const float bias4 = bias ? bias[p+4] : 0.f;
const float bias5 = bias ? bias[p+5] : 0.f;
const float bias6 = bias ? bias[p+6] : 0.f;
const float bias7 = bias ? bias[p+7] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
out2.fill(bias2);
out3.fill(bias3);
out4.fill(bias4);
out5.fill(bias5);
out6.fill(bias6);
out7.fill(bias7);
int q = 0;
for (; q+7<inch; q+=8)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr2 = out2;
float* outptr3 = out3;
float* outptr4 = out4;
float* outptr5 = out5;
float* outptr6 = out6;
float* outptr7 = out7;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* img4 = bottom_blob.channel(q+4);
const float* img5 = bottom_blob.channel(q+5);
const float* img6 = bottom_blob.channel(q+6);
const float* img7 = bottom_blob.channel(q+7);
const float* kernel0 = kernel + p*inch + q;
const float* kernel1 = kernel + (p+1)*inch + q;
const float* kernel2 = kernel + (p+2)*inch + q;
const float* kernel3 = kernel + (p+3)*inch + q;
const float* kernel4 = kernel + (p+4)*inch + q;
const float* kernel5 = kernel + (p+5)*inch + q;
const float* kernel6 = kernel + (p+6)*inch + q;
const float* kernel7 = kernel + (p+7)*inch + q;
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
const float* r4 = img4;
const float* r5 = img5;
const float* r6 = img6;
const float* r7 = img7;
int size = outw * outh;
int nn = size >> 2;
int remain = size & 3;
float32x4_t _k0 = vld1q_f32(kernel0);
float32x4_t _k1 = vld1q_f32(kernel1);
float32x4_t _k2 = vld1q_f32(kernel2);
float32x4_t _k3 = vld1q_f32(kernel3);
float32x4_t _k4 = vld1q_f32(kernel4);
float32x4_t _k5 = vld1q_f32(kernel5);
float32x4_t _k6 = vld1q_f32(kernel6);
float32x4_t _k7 = vld1q_f32(kernel7);
float32x4_t _k0n = vld1q_f32(kernel0+4);
float32x4_t _k1n = vld1q_f32(kernel1+4);
float32x4_t _k2n = vld1q_f32(kernel2+4);
float32x4_t _k3n = vld1q_f32(kernel3+4);
float32x4_t _k4n = vld1q_f32(kernel4+4);
float32x4_t _k5n = vld1q_f32(kernel5+4);
float32x4_t _k6n = vld1q_f32(kernel6+4);
float32x4_t _k7n = vld1q_f32(kernel7+4);
#ifdef __clang__
// gcc reject over 30 oprands :(
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v17.4s}, [%9], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v18.4s}, [%1] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v19.4s}, [%2] \n"
"0: \n"
"fmla v18.4s, v17.4s, %34.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v20.4s}, [%3] \n"
"fmla v19.4s, v17.4s, %35.s[0] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v21.4s}, [%4] \n"
"fmla v20.4s, v17.4s, %36.s[0] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v22.4s}, [%5] \n"
"fmla v21.4s, v17.4s, %37.s[0] \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v23.4s}, [%6] \n"
"fmla v22.4s, v17.4s, %38.s[0] \n"
"prfm pldl1keep, [%10, #128] \n"
"ld1 {v16.4s}, [%10], #16 \n"
"fmla v23.4s, v17.4s, %39.s[0] \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v24.4s}, [%7] \n"
"fmla v18.4s, v16.4s, %34.s[1] \n"
"fmla v19.4s, v16.4s, %35.s[1] \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v25.4s}, [%8] \n"
"fmla v24.4s, v17.4s, %40.s[0] \n"
"fmla v25.4s, v17.4s, %41.s[0] \n"
"fmla v20.4s, v16.4s, %36.s[1] \n"
"fmla v21.4s, v16.4s, %37.s[1] \n"
"prfm pldl1keep, [%11, #128] \n"
"ld1 {v17.4s}, [%11], #16 \n"
"fmla v22.4s, v16.4s, %38.s[1] \n"
"fmla v23.4s, v16.4s, %39.s[1] \n"
"fmla v18.4s, v17.4s, %34.s[2] \n"
"fmla v19.4s, v17.4s, %35.s[2] \n"
"fmla v24.4s, v16.4s, %40.s[1] \n"
"fmla v25.4s, v16.4s, %41.s[1] \n"
"fmla v20.4s, v17.4s, %36.s[2] \n"
"fmla v21.4s, v17.4s, %37.s[2] \n"
"prfm pldl1keep, [%12, #128] \n"
"ld1 {v16.4s}, [%12], #16 \n"
"fmla v22.4s, v17.4s, %38.s[2] \n"
"fmla v23.4s, v17.4s, %39.s[2] \n"
"fmla v18.4s, v16.4s, %34.s[3] \n"
"fmla v19.4s, v16.4s, %35.s[3] \n"
"fmla v24.4s, v17.4s, %40.s[2] \n"
"fmla v25.4s, v17.4s, %41.s[2] \n"
"fmla v20.4s, v16.4s, %36.s[3] \n"
"fmla v21.4s, v16.4s, %37.s[3] \n"
"prfm pldl1keep, [%13, #128] \n"
"ld1 {v17.4s}, [%13], #16 \n"
"fmla v22.4s, v16.4s, %38.s[3] \n"
"fmla v23.4s, v16.4s, %39.s[3] \n"
"fmla v18.4s, v17.4s, %42.s[0] \n"
"fmla v19.4s, v17.4s, %43.s[0] \n"
"fmla v24.4s, v16.4s, %40.s[3] \n"
"fmla v25.4s, v16.4s, %41.s[3] \n"
"fmla v20.4s, v17.4s, %44.s[0] \n"
"fmla v21.4s, v17.4s, %45.s[0] \n"
"prfm pldl1keep, [%14, #128] \n"
"ld1 {v16.4s}, [%14], #16 \n"
"fmla v22.4s, v17.4s, %46.s[0] \n"
"fmla v23.4s, v17.4s, %47.s[0] \n"
"fmla v18.4s, v16.4s, %42.s[1] \n"
"fmla v19.4s, v16.4s, %43.s[1] \n"
"fmla v24.4s, v17.4s, %48.s[0] \n"
"fmla v25.4s, v17.4s, %49.s[0] \n"
"fmla v20.4s, v16.4s, %44.s[1] \n"
"fmla v21.4s, v16.4s, %45.s[1] \n"
"prfm pldl1keep, [%15, #128] \n"
"ld1 {v17.4s}, [%15], #16 \n"
"fmla v22.4s, v16.4s, %46.s[1] \n"
"fmla v23.4s, v16.4s, %47.s[1] \n"
"fmla v18.4s, v17.4s, %42.s[2] \n"
"fmla v19.4s, v17.4s, %43.s[2] \n"
"fmla v24.4s, v16.4s, %48.s[1] \n"
"fmla v25.4s, v16.4s, %49.s[1] \n"
"fmla v20.4s, v17.4s, %44.s[2] \n"
"fmla v21.4s, v17.4s, %45.s[2] \n"
"prfm pldl1keep, [%16, #128] \n"
"ld1 {v16.4s}, [%16], #16 \n"
"fmla v22.4s, v17.4s, %46.s[2] \n"
"fmla v23.4s, v17.4s, %47.s[2] \n"
"fmla v18.4s, v16.4s, %42.s[3] \n"
"fmla v19.4s, v16.4s, %43.s[3] \n"
"fmla v24.4s, v17.4s, %48.s[2] \n"
"fmla v25.4s, v17.4s, %49.s[2] \n"
"fmla v20.4s, v16.4s, %44.s[3] \n"
"fmla v21.4s, v16.4s, %45.s[3] \n"
"st1 {v18.4s}, [%1], #16 \n"
"fmla v22.4s, v16.4s, %46.s[3] \n"
"st1 {v19.4s}, [%2], #16 \n"
"fmla v23.4s, v16.4s, %47.s[3] \n"
"st1 {v20.4s}, [%3], #16 \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v17.4s}, [%9], #16 \n"
"fmla v24.4s, v16.4s, %48.s[3] \n"
"st1 {v21.4s}, [%4], #16 \n"
"fmla v25.4s, v16.4s, %49.s[3] \n"
"st1 {v22.4s}, [%5], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v18.4s}, [%1] \n"
"st1 {v23.4s}, [%6], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v19.4s}, [%2] \n"
"st1 {v24.4s}, [%7], #16 \n"
"subs %w0, %w0, #1 \n"
"st1 {v25.4s}, [%8], #16 \n"
"bne 0b \n"
"sub %9, %9, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(outptr4),// %5
"=r"(outptr5),// %6
"=r"(outptr6),// %7
"=r"(outptr7),// %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(r3), // %12
"=r"(r4), // %13
"=r"(r5), // %14
"=r"(r6), // %15
"=r"(r7) // %16
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(r3),
"13"(r4),
"14"(r5),
"15"(r6),
"16"(r7),
"w"(_k0), // %34
"w"(_k1), // %35
"w"(_k2), // %36
"w"(_k3), // %37
"w"(_k4), // %38
"w"(_k5), // %39
"w"(_k6), // %40
"w"(_k7), // %41
"w"(_k0n), // %42
"w"(_k1n), // %43
"w"(_k2n), // %44
"w"(_k3n), // %45
"w"(_k4n), // %46
"w"(_k5n), // %47
"w"(_k6n), // %48
"w"(_k7n) // %49
: "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"//, "v26", "v27", "v28", "v29", "v30", "v31"
);
}
#else
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _out0p = vld1q_f32(outptr0);
float32x4_t _out1p = vld1q_f32(outptr1);
float32x4_t _out2p = vld1q_f32(outptr2);
float32x4_t _out3p = vld1q_f32(outptr3);
float32x4_t _out4p = vld1q_f32(outptr4);
float32x4_t _out5p = vld1q_f32(outptr5);
float32x4_t _out6p = vld1q_f32(outptr6);
float32x4_t _out7p = vld1q_f32(outptr7);
_out0p = vfmaq_laneq_f32(_out0p, _p, _k0, 0);
_out1p = vfmaq_laneq_f32(_out1p, _p, _k1, 0);
_out2p = vfmaq_laneq_f32(_out2p, _p, _k2, 0);
_out3p = vfmaq_laneq_f32(_out3p, _p, _k3, 0);
_out4p = vfmaq_laneq_f32(_out4p, _p, _k4, 0);
_out5p = vfmaq_laneq_f32(_out5p, _p, _k5, 0);
_out6p = vfmaq_laneq_f32(_out6p, _p, _k6, 0);
_out7p = vfmaq_laneq_f32(_out7p, _p, _k7, 0);
float32x4_t _p1 = vld1q_f32(r1);
_out0p = vfmaq_laneq_f32(_out0p, _p1, _k0, 1);
_out1p = vfmaq_laneq_f32(_out1p, _p1, _k1, 1);
_out2p = vfmaq_laneq_f32(_out2p, _p1, _k2, 1);
_out3p = vfmaq_laneq_f32(_out3p, _p1, _k3, 1);
_out4p = vfmaq_laneq_f32(_out4p, _p1, _k4, 1);
_out5p = vfmaq_laneq_f32(_out5p, _p1, _k5, 1);
_out6p = vfmaq_laneq_f32(_out6p, _p1, _k6, 1);
_out7p = vfmaq_laneq_f32(_out7p, _p1, _k7, 1);
float32x4_t _p2 = vld1q_f32(r2);
_out0p = vfmaq_laneq_f32(_out0p, _p2, _k0, 2);
_out1p = vfmaq_laneq_f32(_out1p, _p2, _k1, 2);
_out2p = vfmaq_laneq_f32(_out2p, _p2, _k2, 2);
_out3p = vfmaq_laneq_f32(_out3p, _p2, _k3, 2);
_out4p = vfmaq_laneq_f32(_out4p, _p2, _k4, 2);
_out5p = vfmaq_laneq_f32(_out5p, _p2, _k5, 2);
_out6p = vfmaq_laneq_f32(_out6p, _p2, _k6, 2);
_out7p = vfmaq_laneq_f32(_out7p, _p2, _k7, 2);
float32x4_t _p3 = vld1q_f32(r3);
_out0p = vfmaq_laneq_f32(_out0p, _p3, _k0, 3);
_out1p = vfmaq_laneq_f32(_out1p, _p3, _k1, 3);
_out2p = vfmaq_laneq_f32(_out2p, _p3, _k2, 3);
_out3p = vfmaq_laneq_f32(_out3p, _p3, _k3, 3);
_out4p = vfmaq_laneq_f32(_out4p, _p3, _k4, 3);
_out5p = vfmaq_laneq_f32(_out5p, _p3, _k5, 3);
_out6p = vfmaq_laneq_f32(_out6p, _p3, _k6, 3);
_out7p = vfmaq_laneq_f32(_out7p, _p3, _k7, 3);
float32x4_t _p4 = vld1q_f32(r4);
_out0p = vfmaq_laneq_f32(_out0p, _p4, _k0n, 0);
_out1p = vfmaq_laneq_f32(_out1p, _p4, _k1n, 0);
_out2p = vfmaq_laneq_f32(_out2p, _p4, _k2n, 0);
_out3p = vfmaq_laneq_f32(_out3p, _p4, _k3n, 0);
_out4p = vfmaq_laneq_f32(_out4p, _p4, _k4n, 0);
_out5p = vfmaq_laneq_f32(_out5p, _p4, _k5n, 0);
_out6p = vfmaq_laneq_f32(_out6p, _p4, _k6n, 0);
_out7p = vfmaq_laneq_f32(_out7p, _p4, _k7n, 0);
float32x4_t _p5 = vld1q_f32(r5);
_out0p = vfmaq_laneq_f32(_out0p, _p5, _k0n, 1);
_out1p = vfmaq_laneq_f32(_out1p, _p5, _k1n, 1);
_out2p = vfmaq_laneq_f32(_out2p, _p5, _k2n, 1);
_out3p = vfmaq_laneq_f32(_out3p, _p5, _k3n, 1);
_out4p = vfmaq_laneq_f32(_out4p, _p5, _k4n, 1);
_out5p = vfmaq_laneq_f32(_out5p, _p5, _k5n, 1);
_out6p = vfmaq_laneq_f32(_out6p, _p5, _k6n, 1);
_out7p = vfmaq_laneq_f32(_out7p, _p5, _k7n, 1);
float32x4_t _p6 = vld1q_f32(r6);
_out0p = vfmaq_laneq_f32(_out0p, _p6, _k0n, 2);
_out1p = vfmaq_laneq_f32(_out1p, _p6, _k1n, 2);
_out2p = vfmaq_laneq_f32(_out2p, _p6, _k2n, 2);
_out3p = vfmaq_laneq_f32(_out3p, _p6, _k3n, 2);
_out4p = vfmaq_laneq_f32(_out4p, _p6, _k4n, 2);
_out5p = vfmaq_laneq_f32(_out5p, _p6, _k5n, 2);
_out6p = vfmaq_laneq_f32(_out6p, _p6, _k6n, 2);
_out7p = vfmaq_laneq_f32(_out7p, _p6, _k7n, 2);
float32x4_t _p7 = vld1q_f32(r7);
_out0p = vfmaq_laneq_f32(_out0p, _p7, _k0n, 3);
_out1p = vfmaq_laneq_f32(_out1p, _p7, _k1n, 3);
_out2p = vfmaq_laneq_f32(_out2p, _p7, _k2n, 3);
_out3p = vfmaq_laneq_f32(_out3p, _p7, _k3n, 3);
_out4p = vfmaq_laneq_f32(_out4p, _p7, _k4n, 3);
_out5p = vfmaq_laneq_f32(_out5p, _p7, _k5n, 3);
_out6p = vfmaq_laneq_f32(_out6p, _p7, _k6n, 3);
_out7p = vfmaq_laneq_f32(_out7p, _p7, _k7n, 3);
vst1q_f32(outptr0, _out0p);
vst1q_f32(outptr1, _out1p);
vst1q_f32(outptr2, _out2p);
vst1q_f32(outptr3, _out3p);
vst1q_f32(outptr4, _out4p);
vst1q_f32(outptr5, _out5p);
vst1q_f32(outptr6, _out6p);
vst1q_f32(outptr7, _out7p);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
r6 += 4;
r7 += 4;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
outptr4 += 4;
outptr5 += 4;
outptr6 += 4;
outptr7 += 4;
}
#endif
for (; remain>0; remain--)
{
// TODO neon optimize
float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3] + *r4 * kernel0[4] + *r5 * kernel0[5] + *r6 * kernel0[6] + *r7 * kernel0[7];
float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3] + *r4 * kernel1[4] + *r5 * kernel1[5] + *r6 * kernel1[6] + *r7 * kernel1[7];
float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3] + *r4 * kernel2[4] + *r5 * kernel2[5] + *r6 * kernel2[6] + *r7 * kernel2[7];
float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3] + *r4 * kernel3[4] + *r5 * kernel3[5] + *r6 * kernel3[6] + *r7 * kernel3[7];
float sum4 = *r0 * kernel4[0] + *r1 * kernel4[1] + *r2 * kernel4[2] + *r3 * kernel4[3] + *r4 * kernel4[4] + *r5 * kernel4[5] + *r6 * kernel4[6] + *r7 * kernel4[7];
float sum5 = *r0 * kernel5[0] + *r1 * kernel5[1] + *r2 * kernel5[2] + *r3 * kernel5[3] + *r4 * kernel5[4] + *r5 * kernel5[5] + *r6 * kernel5[6] + *r7 * kernel5[7];
float sum6 = *r0 * kernel6[0] + *r1 * kernel6[1] + *r2 * kernel6[2] + *r3 * kernel6[3] + *r4 * kernel6[4] + *r5 * kernel6[5] + *r6 * kernel6[6] + *r7 * kernel6[7];
float sum7 = *r0 * kernel7[0] + *r1 * kernel7[1] + *r2 * kernel7[2] + *r3 * kernel7[3] + *r4 * kernel7[4] + *r5 * kernel7[5] + *r6 * kernel7[6] + *r7 * kernel7[7];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
*outptr4 += sum4;
*outptr5 += sum5;
*outptr6 += sum6;
*outptr7 += sum7;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
r7++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
}
}
for (; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr2 = out2;
float* outptr3 = out3;
float* outptr4 = out4;
float* outptr5 = out5;
float* outptr6 = out6;
float* outptr7 = out7;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float* kernel1 = kernel + (p+1)*inch + q;
const float* kernel2 = kernel + (p+2)*inch + q;
const float* kernel3 = kernel + (p+3)*inch + q;
const float* kernel4 = kernel + (p+4)*inch + q;
const float* kernel5 = kernel + (p+5)*inch + q;
const float* kernel6 = kernel + (p+6)*inch + q;
const float* kernel7 = kernel + (p+7)*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel1[0];
const float k2 = kernel2[0];
const float k3 = kernel3[0];
const float k4 = kernel4[0];
const float k5 = kernel5[0];
const float k6 = kernel6[0];
const float k7 = kernel7[0];
const float* r0 = img0;
int size = outw * outh;
int nn = size >> 2;
int remain = size & 3;
float32x4_t _k0 = vdupq_n_f32(k0);
float32x4_t _k1 = vdupq_n_f32(k1);
float32x4_t _k2 = vdupq_n_f32(k2);
float32x4_t _k3 = vdupq_n_f32(k3);
float32x4_t _k4 = vdupq_n_f32(k4);
float32x4_t _k5 = vdupq_n_f32(k5);
float32x4_t _k6 = vdupq_n_f32(k6);
float32x4_t _k7 = vdupq_n_f32(k7);
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _out0p = vld1q_f32(outptr0);
float32x4_t _out1p = vld1q_f32(outptr1);
float32x4_t _out2p = vld1q_f32(outptr2);
float32x4_t _out3p = vld1q_f32(outptr3);
float32x4_t _out4p = vld1q_f32(outptr4);
float32x4_t _out5p = vld1q_f32(outptr5);
float32x4_t _out6p = vld1q_f32(outptr6);
float32x4_t _out7p = vld1q_f32(outptr7);
_out0p = vfmaq_f32(_out0p, _p, _k0);
_out1p = vfmaq_f32(_out1p, _p, _k1);
_out2p = vfmaq_f32(_out2p, _p, _k2);
_out3p = vfmaq_f32(_out3p, _p, _k3);
_out4p = vfmaq_f32(_out4p, _p, _k4);
_out5p = vfmaq_f32(_out5p, _p, _k5);
_out6p = vfmaq_f32(_out6p, _p, _k6);
_out7p = vfmaq_f32(_out7p, _p, _k7);
vst1q_f32(outptr0, _out0p);
vst1q_f32(outptr1, _out1p);
vst1q_f32(outptr2, _out2p);
vst1q_f32(outptr3, _out3p);
vst1q_f32(outptr4, _out4p);
vst1q_f32(outptr5, _out5p);
vst1q_f32(outptr6, _out6p);
vst1q_f32(outptr7, _out7p);
r0 += 4;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
outptr4 += 4;
outptr5 += 4;
outptr6 += 4;
outptr7 += 4;
}
for (; remain>0; remain--)
{
// TODO neon optimize
float sum0 = *r0 * k0;
float sum1 = *r0 * k1;
float sum2 = *r0 * k2;
float sum3 = *r0 * k3;
float sum4 = *r0 * k4;
float sum5 = *r0 * k5;
float sum6 = *r0 * k6;
float sum7 = *r0 * k7;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
*outptr4 += sum4;
*outptr5 += sum5;
*outptr6 += sum6;
*outptr7 += sum7;
r0++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
}
}
}
nn_outch = (outch - remain_outch_start) >> 2;
remain_outch_start += nn_outch << 2;
#else // __ARM_NEON && __aarch64__
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
Mat out2 = top_blob.channel(p+2);
Mat out3 = top_blob.channel(p+3);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
const float bias2 = bias ? bias[p+2] : 0.f;
const float bias3 = bias ? bias[p+3] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
out2.fill(bias2);
out3.fill(bias3);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr2 = out2;
float* outptr3 = out3;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float* kernel1 = kernel + (p+1)*inch + q;
const float* kernel2 = kernel + (p+2)*inch + q;
const float* kernel3 = kernel + (p+3)*inch + q;
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
int size = outw * outh;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(kernel0);
float32x4_t _k1 = vld1q_f32(kernel1);
float32x4_t _k2 = vld1q_f32(kernel2);
float32x4_t _k3 = vld1q_f32(kernel3);
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v6.4s, v7.4s}, [%5], #32 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v8.4s, v9.4s}, [%1] \n"
"0: \n"
"fmla v8.4s, v6.4s, %18.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4s, v11.4s}, [%2] \n"
"fmla v9.4s, v7.4s, %18.s[0] \n"
"fmla v10.4s, v6.4s, %19.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v12.4s, v13.4s}, [%3] \n"
"fmla v11.4s, v7.4s, %19.s[0] \n"
"fmla v12.4s, v6.4s, %20.s[0] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v14.4s, v15.4s}, [%4] \n"
"fmla v13.4s, v7.4s, %20.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4s, v5.4s}, [%6], #32 \n"
"fmla v14.4s, v6.4s, %21.s[0] \n"
"fmla v15.4s, v7.4s, %21.s[0] \n"
"fmla v8.4s, v4.4s, %18.s[1] \n"
"fmla v9.4s, v5.4s, %18.s[1] \n"
"fmla v10.4s, v4.4s, %19.s[1] \n"
"fmla v11.4s, v5.4s, %19.s[1] \n"
"fmla v12.4s, v4.4s, %20.s[1] \n"
"fmla v13.4s, v5.4s, %20.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v6.4s, v7.4s}, [%7], #32 \n"
"fmla v14.4s, v4.4s, %21.s[1] \n"
"fmla v15.4s, v5.4s, %21.s[1] \n"
"fmla v8.4s, v6.4s, %18.s[2] \n"
"fmla v9.4s, v7.4s, %18.s[2] \n"
"fmla v10.4s, v6.4s, %19.s[2] \n"
"fmla v11.4s, v7.4s, %19.s[2] \n"
"fmla v12.4s, v6.4s, %20.s[2] \n"
"fmla v13.4s, v7.4s, %20.s[2] \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v4.4s, v5.4s}, [%8], #32 \n"
"fmla v14.4s, v6.4s, %21.s[2] \n"
"fmla v15.4s, v7.4s, %21.s[2] \n"
"fmla v8.4s, v4.4s, %18.s[3] \n"
"fmla v9.4s, v5.4s, %18.s[3] \n"
"fmla v10.4s, v4.4s, %19.s[3] \n"
"fmla v11.4s, v5.4s, %19.s[3] \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"fmla v12.4s, v4.4s, %20.s[3] \n"
"fmla v13.4s, v5.4s, %20.s[3] \n"
"st1 {v10.4s, v11.4s}, [%2], #32 \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v6.4s, v7.4s}, [%5], #32 \n"
"fmla v14.4s, v4.4s, %21.s[3] \n"
"fmla v15.4s, v5.4s, %21.s[3] \n"
"st1 {v12.4s, v13.4s}, [%3], #32 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v8.4s, v9.4s}, [%1] \n"
"subs %w0, %w0, #1 \n"
"st1 {v14.4s, v15.4s}, [%4], #32 \n"
"bne 0b \n"
"sub %5, %5, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k0), // %18
"w"(_k1), // %19
"w"(_k2), // %20
"w"(_k3) // %21
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%5, #256] \n"
"vld1.f32 {d12-d15}, [%5 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"
"0: \n"
"vmla.f32 q8, q6, %e18[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"
"vmla.f32 q9, q7, %e18[0] \n"
"vmla.f32 q10, q6, %e19[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128] \n"
"vmla.f32 q11, q7, %e19[0] \n"
"vmla.f32 q12, q6, %e20[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128] \n"
"vmla.f32 q13, q7, %e20[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d8-d11}, [%6 :128]! \n"
"vmla.f32 q14, q6, %e21[0] \n"
"vmla.f32 q15, q7, %e21[0] \n"
"vmla.f32 q8, q4, %e18[1] \n"
"vmla.f32 q9, q5, %e18[1] \n"
"vmla.f32 q10, q4, %e19[1] \n"
"vmla.f32 q11, q5, %e19[1] \n"
"vmla.f32 q12, q4, %e20[1] \n"
"vmla.f32 q13, q5, %e20[1] \n"
"pld [%7, #256] \n"
"vld1.f32 {d12-d15}, [%7 :128]! \n"
"vmla.f32 q14, q4, %e21[1] \n"
"vmla.f32 q15, q5, %e21[1] \n"
"vmla.f32 q8, q6, %f18[0] \n"
"vmla.f32 q9, q7, %f18[0] \n"
"vmla.f32 q10, q6, %f19[0] \n"
"vmla.f32 q11, q7, %f19[0] \n"
"vmla.f32 q12, q6, %f20[0] \n"
"vmla.f32 q13, q7, %f20[0] \n"
"pld [%8, #256] \n"
"vld1.f32 {d8-d11}, [%8 :128]! \n"
"vmla.f32 q14, q6, %f21[0] \n"
"vmla.f32 q15, q7, %f21[0] \n"
"vmla.f32 q8, q4, %f18[1] \n"
"vmla.f32 q9, q5, %f18[1] \n"
"vmla.f32 q10, q4, %f19[1] \n"
"vmla.f32 q11, q5, %f19[1] \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"vmla.f32 q12, q4, %f20[1] \n"
"vmla.f32 q13, q5, %f20[1] \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d12-d15}, [%5 :128]! \n"
"vmla.f32 q14, q4, %f21[1] \n"
"vmla.f32 q15, q5, %f21[1] \n"
"vst1.f32 {d24-d27}, [%3 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"
"subs %0, #1 \n"
"vst1.f32 {d28-d31}, [%4 :128]! \n"
"bne 0b \n"
"sub %5, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k0), // %18
"w"(_k1), // %19
"w"(_k2), // %20
"w"(_k3) // %21
: "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
// TODO neon optimize
float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3];
float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3];
float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3];
float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
for (; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr2 = out2;
float* outptr3 = out3;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float* kernel1 = kernel + (p+1)*inch + q;
const float* kernel2 = kernel + (p+2)*inch + q;
const float* kernel3 = kernel + (p+3)*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel1[0];
const float k2 = kernel2[0];
const float k3 = kernel3[0];
const float* r0 = img0;
int size = outw * outh;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
float32x4_t _k1 = vdupq_n_f32(k1);
float32x4_t _k2 = vdupq_n_f32(k2);
float32x4_t _k3 = vdupq_n_f32(k3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _pn = vld1q_f32(r0+4);
float32x4_t _out0p = vld1q_f32(outptr0);
float32x4_t _out0pn = vld1q_f32(outptr0+4);
float32x4_t _out1p = vld1q_f32(outptr1);
float32x4_t _out1pn = vld1q_f32(outptr1+4);
float32x4_t _out2p = vld1q_f32(outptr2);
float32x4_t _out2pn = vld1q_f32(outptr2+4);
float32x4_t _out3p = vld1q_f32(outptr3);
float32x4_t _out3pn = vld1q_f32(outptr3+4);
_out0p = vfmaq_f32(_out0p, _p, _k0);
_out0pn = vfmaq_f32(_out0pn, _pn, _k0);
_out1p = vfmaq_f32(_out1p, _p, _k1);
_out1pn = vfmaq_f32(_out1pn, _pn, _k1);
_out2p = vfmaq_f32(_out2p, _p, _k2);
_out2pn = vfmaq_f32(_out2pn, _pn, _k2);
_out3p = vfmaq_f32(_out3p, _p, _k3);
_out3pn = vfmaq_f32(_out3pn, _pn, _k3);
vst1q_f32(outptr0, _out0p);
vst1q_f32(outptr0+4, _out0pn);
vst1q_f32(outptr1, _out1p);
vst1q_f32(outptr1+4, _out1pn);
vst1q_f32(outptr2, _out2p);
vst1q_f32(outptr2+4, _out2pn);
vst1q_f32(outptr3, _out3p);
vst1q_f32(outptr3+4, _out3pn);
r0 += 8;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%5, #256] \n"
"vld1.f32 {d12-d15}, [%5 :128]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"
"vmla.f32 q8, q6, %q12 \n"
"vmla.f32 q9, q7, %q12 \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"
"vmla.f32 q10, q6, %q13 \n"
"vmla.f32 q11, q7, %q13 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128] \n"
"vmla.f32 q12, q6, %q14 \n"
"vmla.f32 q13, q7, %q14 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128] \n"
"vmla.f32 q14, q6, %q15 \n"
"vmla.f32 q15, q7, %q15 \n"
"vst1.f32 {d24-d27}, [%3 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d12-d15}, [%5 :128]! \n"
"subs %0, #1 \n"
"vst1.f32 {d28-d31}, [%4 :128]! \n"
"bne 0b \n"
"sub %5, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(r0) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
// TODO neon optimize
float sum0 = *r0 * k0;
float sum1 = *r0 * k1;
float sum2 = *r0 * k2;
float sum3 = *r0 * k3;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
r0++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
}
#pragma omp parallel for
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel0[1];
const float k2 = kernel0[2];
const float k3 = kernel0[3];
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
int size = outw * outh;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
float32x4_t _k1 = vdupq_n_f32(k1);
float32x4_t _k2 = vdupq_n_f32(k2);
float32x4_t _k3 = vdupq_n_f32(k3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _pn = vld1q_f32(r0+4);
float32x4_t _outp = vld1q_f32(outptr);
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vfmaq_f32(_outp, _p, _k0);
_outpn = vfmaq_f32(_outpn, _pn, _k0);
float32x4_t _p1 = vld1q_f32(r1);
float32x4_t _p1n = vld1q_f32(r1+4);
_outp = vfmaq_f32(_outp, _p1, _k1);
_outpn = vfmaq_f32(_outpn, _p1n, _k1);
float32x4_t _p2 = vld1q_f32(r2);
float32x4_t _p2n = vld1q_f32(r2+4);
_outp = vfmaq_f32(_outp, _p2, _k2);
_outpn = vfmaq_f32(_outpn, _p2n, _k2);
float32x4_t _p3 = vld1q_f32(r3);
float32x4_t _p3n = vld1q_f32(r3+4);
_outp = vfmaq_f32(_outp, _p3, _k3);
_outpn = vfmaq_f32(_outpn, _p3n, _k3);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128] \n"
"vmla.f32 q0, q2, %q12 \n"
"vmla.f32 q1, q3, %q12 \n"
"pld [%3, #256] \n"
"vld1.f32 {d4-d7}, [%3 :128]! \n"
"vmla.f32 q0, q2, %q13 \n"
"vmla.f32 q1, q3, %q13 \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4 :128]! \n"
"vmla.f32 q0, q2, %q14 \n"
"vmla.f32 q1, q3, %q14 \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q0, q2, %q15 \n"
"vmla.f32 q1, q3, %q15 \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
float sum1 = *r1 * k1;
float sum2 = *r2 * k2;
float sum3 = *r3 * k3;
*outptr += sum + sum1 + sum2 + sum3;
r0++;
r1++;
r2++;
r3++;
outptr++;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float* r0 = img0;
int size = outw * outh;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _outp = vld1q_f32(outptr);
float32x4_t _pn = vld1q_f32(r0+4);
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vfmaq_f32(_outp, _p, _k0);
_outpn = vfmaq_f32(_outpn, _pn, _k0);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 8;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128] \n"
"vmla.f32 q0, q2, %q6 \n"
"vmla.f32 q1, q3, %q6 \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0) // %2
: "0"(nn),
"1"(outptr),
"2"(r0),
"w"(_k0) // %6
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
*outptr += sum;
r0++;
outptr++;
}
}
}
}
static void conv1x1s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
Mat out2 = top_blob.channel(p+2);
Mat out3 = top_blob.channel(p+3);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
const float bias2 = bias ? bias[p+2] : 0.f;
const float bias3 = bias ? bias[p+3] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
out2.fill(bias2);
out3.fill(bias3);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr2 = out2;
float* outptr3 = out3;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float* kernel1 = kernel + (p+1)*inch + q;
const float* kernel2 = kernel + (p+2)*inch + q;
const float* kernel3 = kernel + (p+3)*inch + q;
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
for (int i = 0; i < outh; i++)
{
int size = outw;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(kernel0);
float32x4_t _k1 = vld1q_f32(kernel1);
float32x4_t _k2 = vld1q_f32(kernel2);
float32x4_t _k3 = vld1q_f32(kernel3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4x2_t _px2 = vld2q_f32(r0);
float32x4_t _p = _px2.val[0];
float32x4x2_t _pnx2 = vld2q_f32(r0+8);
float32x4_t _pn = _pnx2.val[0];
float32x4_t _out0p = vld1q_f32(outptr0);
float32x4_t _out0pn = vld1q_f32(outptr0+4);
float32x4_t _out1p = vld1q_f32(outptr1);
float32x4_t _out1pn = vld1q_f32(outptr1+4);
float32x4_t _out2p = vld1q_f32(outptr2);
float32x4_t _out2pn = vld1q_f32(outptr2+4);
float32x4_t _out3p = vld1q_f32(outptr3);
float32x4_t _out3pn = vld1q_f32(outptr3+4);
_out0p = vfmaq_laneq_f32(_out0p, _p, _k0, 0);
_out0pn = vfmaq_laneq_f32(_out0pn, _pn, _k0, 0);
_out1p = vfmaq_laneq_f32(_out1p, _p, _k1, 0);
_out1pn = vfmaq_laneq_f32(_out1pn, _pn, _k1, 0);
_out2p = vfmaq_laneq_f32(_out2p, _p, _k2, 0);
_out2pn = vfmaq_laneq_f32(_out2pn, _pn, _k2, 0);
_out3p = vfmaq_laneq_f32(_out3p, _p, _k3, 0);
_out3pn = vfmaq_laneq_f32(_out3pn, _pn, _k3, 0);
float32x4x2_t _p1x2 = vld2q_f32(r1);
float32x4_t _p1 = _p1x2.val[0];
float32x4x2_t _p1nx2 = vld2q_f32(r1+8);
float32x4_t _p1n = _p1nx2.val[0];
_out0p = vfmaq_laneq_f32(_out0p, _p1, _k0, 1);
_out0pn = vfmaq_laneq_f32(_out0pn, _p1n, _k0, 1);
_out1p = vfmaq_laneq_f32(_out1p, _p1, _k1, 1);
_out1pn = vfmaq_laneq_f32(_out1pn, _p1n, _k1, 1);
_out2p = vfmaq_laneq_f32(_out2p, _p1, _k2, 1);
_out2pn = vfmaq_laneq_f32(_out2pn, _p1n, _k2, 1);
_out3p = vfmaq_laneq_f32(_out3p, _p1, _k3, 1);
_out3pn = vfmaq_laneq_f32(_out3pn, _p1n, _k3, 1);
float32x4x2_t _p2x2 = vld2q_f32(r2);
float32x4_t _p2 = _p2x2.val[0];
float32x4x2_t _p2nx2 = vld2q_f32(r2+8);
float32x4_t _p2n = _p2nx2.val[0];
_out0p = vfmaq_laneq_f32(_out0p, _p2, _k0, 2);
_out0pn = vfmaq_laneq_f32(_out0pn, _p2n, _k0, 2);
_out1p = vfmaq_laneq_f32(_out1p, _p2, _k1, 2);
_out1pn = vfmaq_laneq_f32(_out1pn, _p2n, _k1, 2);
_out2p = vfmaq_laneq_f32(_out2p, _p2, _k2, 2);
_out2pn = vfmaq_laneq_f32(_out2pn, _p2n, _k2, 2);
_out3p = vfmaq_laneq_f32(_out3p, _p2, _k3, 2);
_out3pn = vfmaq_laneq_f32(_out3pn, _p2n, _k3, 2);
float32x4x2_t _p3x2 = vld2q_f32(r3);
float32x4_t _p3 = _p3x2.val[0];
float32x4x2_t _p3nx2 = vld2q_f32(r3+8);
float32x4_t _p3n = _p3nx2.val[0];
_out0p = vfmaq_laneq_f32(_out0p, _p3, _k0, 3);
_out0pn = vfmaq_laneq_f32(_out0pn, _p3n, _k0, 3);
_out1p = vfmaq_laneq_f32(_out1p, _p3, _k1, 3);
_out1pn = vfmaq_laneq_f32(_out1pn, _p3n, _k1, 3);
_out2p = vfmaq_laneq_f32(_out2p, _p3, _k2, 3);
_out2pn = vfmaq_laneq_f32(_out2pn, _p3n, _k2, 3);
_out3p = vfmaq_laneq_f32(_out3p, _p3, _k3, 3);
_out3pn = vfmaq_laneq_f32(_out3pn, _p3n, _k3, 3);
vst1q_f32(outptr0, _out0p);
vst1q_f32(outptr0+4, _out0pn);
vst1q_f32(outptr1, _out1p);
vst1q_f32(outptr1+4, _out1pn);
vst1q_f32(outptr2, _out2p);
vst1q_f32(outptr2+4, _out2pn);
vst1q_f32(outptr3, _out3p);
vst1q_f32(outptr3+4, _out3pn);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%5, #512] \n"
"vld2.f32 {d8-d11}, [%5]! \n"
"vld2.f32 {d12-d15}, [%5]! \n"
"vand q5, q6, q6 \n"// q4 q5
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1] \n"
"vmla.f32 q8, q4, %e18[0] \n"
"vmla.f32 q9, q5, %e18[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2] \n"
"vmla.f32 q10, q4, %e19[0] \n"
"vmla.f32 q11, q5, %e19[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3] \n"
"vmla.f32 q12, q4, %e20[0] \n"
"vmla.f32 q13, q5, %e20[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4] \n"
"pld [%6, #512] \n"
"vld2.f32 {d12-d15}, [%6]! \n"
"vmla.f32 q14, q4, %e21[0] \n"
"vmla.f32 q15, q5, %e21[0] \n"
"vld2.f32 {d8-d11}, [%6]! \n"
"vand q7, q4, q4 \n"// q6 q7
"vmla.f32 q8, q6, %e18[1] \n"
"vmla.f32 q9, q7, %e18[1] \n"
"vmla.f32 q10, q6, %e19[1] \n"
"vmla.f32 q11, q7, %e19[1] \n"
"vmla.f32 q12, q6, %e20[1] \n"
"vmla.f32 q13, q7, %e20[1] \n"
"pld [%7, #512] \n"
"vld2.f32 {d8-d11}, [%7]! \n"
"vmla.f32 q14, q6, %e21[1] \n"
"vmla.f32 q15, q7, %e21[1] \n"
"vld2.f32 {d12-d15}, [%7]! \n"
"vand q5, q6, q6 \n"// q4 q5
"vmla.f32 q8, q4, %f18[0] \n"
"vmla.f32 q9, q5, %f18[0] \n"
"vmla.f32 q10, q4, %f19[0] \n"
"vmla.f32 q11, q5, %f19[0] \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vmla.f32 q13, q5, %f20[0] \n"
"pld [%8, #512] \n"
"vld2.f32 {d12-d15}, [%8]! \n"
"vmla.f32 q14, q4, %f21[0] \n"
"vmla.f32 q15, q5, %f21[0] \n"
"vld2.f32 {d8-d11}, [%8]! \n"
"vand q7, q4, q4 \n"// q6 q7
"vmla.f32 q8, q6, %f18[1] \n"
"vmla.f32 q9, q7, %f18[1] \n"
"vmla.f32 q10, q6, %f19[1] \n"
"vmla.f32 q11, q7, %f19[1] \n"
"vst1.f32 {d16-d19}, [%1]! \n"
"vmla.f32 q12, q6, %f20[1] \n"
"vmla.f32 q13, q7, %f20[1] \n"
"vst1.f32 {d20-d23}, [%2]! \n"
"vmla.f32 q14, q6, %f21[1] \n"
"vmla.f32 q15, q7, %f21[1] \n"
"vst1.f32 {d24-d27}, [%3]! \n"
"subs %0, #1 \n"
"vst1.f32 {d28-d31}, [%4]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k0), // %18
"w"(_k1), // %19
"w"(_k2), // %20
"w"(_k3) // %21
: "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
// TODO neon optimize
float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3];
float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3];
float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3];
float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
}
}
for (; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr2 = out2;
float* outptr3 = out3;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float* kernel1 = kernel + (p+1)*inch + q;
const float* kernel2 = kernel + (p+2)*inch + q;
const float* kernel3 = kernel + (p+3)*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel1[0];
const float k2 = kernel2[0];
const float k3 = kernel3[0];
const float* r0 = img0;
for (int i = 0; i < outh; i++)
{
int size = outw;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
float32x4_t _k1 = vdupq_n_f32(k1);
float32x4_t _k2 = vdupq_n_f32(k2);
float32x4_t _k3 = vdupq_n_f32(k3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4x2_t _px2 = vld2q_f32(r0);
float32x4_t _p = _px2.val[0];
float32x4x2_t _pnx2 = vld2q_f32(r0+8);
float32x4_t _pn = _pnx2.val[0];
float32x4_t _out0p = vld1q_f32(outptr0);
float32x4_t _out0pn = vld1q_f32(outptr0+4);
float32x4_t _out1p = vld1q_f32(outptr1);
float32x4_t _out1pn = vld1q_f32(outptr1+4);
float32x4_t _out2p = vld1q_f32(outptr2);
float32x4_t _out2pn = vld1q_f32(outptr2+4);
float32x4_t _out3p = vld1q_f32(outptr3);
float32x4_t _out3pn = vld1q_f32(outptr3+4);
_out0p = vfmaq_f32(_out0p, _p, _k0);
_out0pn = vfmaq_f32(_out0pn, _pn, _k0);
_out1p = vfmaq_f32(_out1p, _p, _k1);
_out1pn = vfmaq_f32(_out1pn, _pn, _k1);
_out2p = vfmaq_f32(_out2p, _p, _k2);
_out2pn = vfmaq_f32(_out2pn, _pn, _k2);
_out3p = vfmaq_f32(_out3p, _p, _k3);
_out3pn = vfmaq_f32(_out3pn, _pn, _k3);
vst1q_f32(outptr0, _out0p);
vst1q_f32(outptr0+4, _out0pn);
vst1q_f32(outptr1, _out1p);
vst1q_f32(outptr1+4, _out1pn);
vst1q_f32(outptr2, _out2p);
vst1q_f32(outptr2+4, _out2pn);
vst1q_f32(outptr3, _out3p);
vst1q_f32(outptr3+4, _out3pn);
r0 += 16;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%5, #512] \n"
"vld2.f32 {d8-d11}, [%5]! \n"
"vld2.f32 {d12-d15}, [%5]! \n"
"vand q5, q6, q6 \n"// q4 q5
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1] \n"
"vmla.f32 q8, q4, %q12 \n"
"vmla.f32 q9, q5, %q12 \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2] \n"
"vmla.f32 q10, q4, %q13 \n"
"vmla.f32 q11, q5, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3] \n"
"vst1.f32 {d16-d19}, [%1]! \n"
"vmla.f32 q12, q4, %q14 \n"
"vmla.f32 q13, q5, %q14 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4] \n"
"vst1.f32 {d20-d23}, [%2]! \n"
"vmla.f32 q14, q4, %q15 \n"
"vmla.f32 q15, q5, %q15 \n"
"vst1.f32 {d24-d27}, [%3]! \n"
"subs %0, #1 \n"
"vst1.f32 {d28-d31}, [%4]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0),// %1
"=r"(outptr1),// %2
"=r"(outptr2),// %3
"=r"(outptr3),// %4
"=r"(r0) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
// TODO neon optimize
float sum0 = *r0 * k0;
float sum1 = *r0 * k1;
float sum2 = *r0 * k2;
float sum3 = *r0 * k3;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
r0 += 2;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
r0 += tailstep;
}
}
}
#pragma omp parallel for
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel0[1];
const float k2 = kernel0[2];
const float k3 = kernel0[3];
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
float32x4_t _k1 = vdupq_n_f32(k1);
float32x4_t _k2 = vdupq_n_f32(k2);
float32x4_t _k3 = vdupq_n_f32(k3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4x2_t _px2 = vld2q_f32(r0);
float32x4_t _p = _px2.val[0];
float32x4_t _outp = vld1q_f32(outptr);
float32x4x2_t _pnx2 = vld2q_f32(r0+8);
float32x4_t _pn = _pnx2.val[0];
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vmlaq_f32(_outp, _p, _k0);
_outpn = vmlaq_f32(_outpn, _pn, _k0);
float32x4x2_t _p1x2 = vld2q_f32(r1);
float32x4_t _p1 = _p1x2.val[0];
float32x4x2_t _p1nx2 = vld2q_f32(r1+8);
float32x4_t _p1n = _p1nx2.val[0];
_outp = vmlaq_f32(_outp, _p1, _k1);
_outpn = vmlaq_f32(_outpn, _p1n, _k1);
float32x4x2_t _p2x2 = vld2q_f32(r2);
float32x4_t _p2 = _p2x2.val[0];
float32x4x2_t _p2nx2 = vld2q_f32(r2+8);
float32x4_t _p2n = _p2nx2.val[0];
_outp = vmlaq_f32(_outp, _p2, _k2);
_outpn = vmlaq_f32(_outpn, _p2n, _k2);
float32x4x2_t _p3x2 = vld2q_f32(r3);
float32x4_t _p3 = _p3x2.val[0];
float32x4x2_t _p3nx2 = vld2q_f32(r3+8);
float32x4_t _p3n = _p3nx2.val[0];
_outp = vmlaq_f32(_outp, _p3, _k3);
_outpn = vmlaq_f32(_outpn, _p3n, _k3);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1] \n"
"vmla.f32 q0, q2, %q12 \n"
"vmla.f32 q1, q8, %q12 \n"
"pld [%3, #512] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vld2.f32 {d16-d19}, [%3]! \n"
"vmla.f32 q0, q2, %q13 \n"
"vmla.f32 q1, q8, %q13 \n"
"pld [%4, #512] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vld2.f32 {d16-d19}, [%4]! \n"
"vmla.f32 q0, q2, %q14 \n"
"vmla.f32 q1, q8, %q14 \n"
"pld [%5, #512] \n"
"vld2.f32 {d4-d7}, [%5]! \n"
"vld2.f32 {d16-d19}, [%5]! \n"
"vmla.f32 q0, q2, %q15 \n"
"vmla.f32 q1, q8, %q15 \n"
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1]! \n"
"bne 0b \n"
"sub %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
float sum1 = *r1 * k1;
float sum2 = *r2 * k2;
float sum3 = *r3 * k3;
*outptr += sum + sum1 + sum2 + sum3;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float* r0 = img0;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4x2_t _px2 = vld2q_f32(r0);
float32x4_t _p = _px2.val[0];
float32x4_t _outp = vld1q_f32(outptr);
float32x4x2_t _pnx2 = vld2q_f32(r0+8);
float32x4_t _pn = _pnx2.val[0];
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vmlaq_f32(_outp, _p, _k0);
_outpn = vmlaq_f32(_outpn, _pn, _k0);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 16;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1] \n"
"vmla.f32 q0, q2, %q6 \n"
"vmla.f32 q1, q8, %q6 \n"
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1]! \n"
"bne 0b \n"
"sub %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0) // %2
: "0"(nn),
"1"(outptr),
"2"(r0),
"w"(_k0) // %6
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
*outptr += sum;
r0 += 2;
outptr++;
}
r0 += tailstep;
}
}
}
}
|
Fig_12.17_ompVecPi.c | #include <omp.h>
#include <stdio.h>
static long num_steps = 100000;
float step;
int main ()
{
int i;
float x, pi, sum = 0.0;
step = 1.0f / (double) num_steps;
#pragma omp simd private(x) reduction(+:sum)
for (i = 0; i < num_steps; i++) {
x = (i + 0.5f) * step;
sum += 4.0f / (1.0f + x * x);
}
pi = step * sum;
printf("pi=%lf \n", pi);
}
|
broadcast_reduce-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015-2017 by Contributors
* \file broadcast_reduce-inl.h
* \brief CPU-specific Function definition of broadcast and reduce operators
*/
#ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#include <mxnet/operator_util.h>
#include <algorithm>
#include <vector>
#include <string>
#include <utility>
#include "../mshadow_op.h"
#include "../operator_common.h"
namespace mxnet {
namespace op {
namespace broadcast {
using namespace mshadow;
const int MAX_DIM = 5;
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
template<int ndim>
MSHADOW_XINLINE void unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stridej, const Shape<ndim>& stridek, index_t* j, index_t* k) {
*j = 0;
*k = 0;
#pragma unroll
for (index_t i = ndim-1, idx_t = idx; i >=0; --i) {
const auto tmp = idx_t / shape[i];
const auto coord = idx_t - tmp*shape[i];
*j += coord*stridej[i];
*k += coord*stridek[i];
idx_t = tmp;
}
}
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (index_t i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > 1) * coord[i];
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims,
Shape<ndim>* stride) {
int mdim = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
mdim += small[i] != big[i];
(*dims)[i] = (*stride)[i] = 1;
}
#pragma unroll
for (int i = ndim-1, j = mdim, s = 1; i >= 0; --i) {
if (small[i] != big[i]) {
--j;
(*stride)[j] = s;
(*dims)[j] = big[i];
}
s *= big[i];
}
return mdim;
}
template<int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i)
ret += coord[i] * stride[i];
return ret;
}
template<typename DType>
MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) {
if (addto) {
*dst += src;
} else {
*dst = src;
}
}
template<int ndim, typename DType, typename OP>
MSHADOW_XINLINE void binary_broadcast_assign(const index_t idx, const bool addto,
const DType* __restrict lhs,
const DType* __restrict rhs, DType* out,
const Shape<ndim>& lshape, const Shape<ndim>& rshape,
const Shape<ndim>& oshape) {
const Shape<ndim> coord = unravel(idx, oshape);
const index_t j = ravel(coord, lshape);
const index_t k = ravel(coord, rshape);
assign(&out[idx], addto, OP::Map(lhs[j], rhs[k]));
}
template<typename Reducer, int ndim, typename DType, typename OP>
MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto,
const DType* __restrict big, DType *small,
const Shape<ndim>& bshape, const Shape<ndim>& sshape,
const Shape<ndim>& rshape, const Shape<ndim>& rstride) {
Shape<ndim> coord = unravel(idx, sshape);
index_t j = ravel(coord, bshape);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
coord = unravel(k, rshape);
Reducer::Reduce(val, OP::Map(big[j + dot(coord, rstride)]), residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, val);
}
#ifdef __CUDACC__
#include "broadcast_reduce-inl.cuh"
#else
template<int ndim, typename DType, typename OP>
void binary_broadcast_compute(const size_t N, const bool addto, const DType *lhs,
const DType *rhs, DType *out, const Shape<ndim> lshape,
const Shape<ndim> rshape, const Shape<ndim> oshape) {
for (size_t idx = 0; idx < N; ++idx) {
binary_broadcast_assign<ndim, DType, OP>(idx, addto, lhs, rhs, out, lshape, rshape, oshape);
}
}
template<int ndim, typename DType, typename OP>
void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req,
const TBlob& lhs, const TBlob& rhs, const TBlob& out) {
if (req == kNullOp) return;
size_t N = out.shape_.Size();
binary_broadcast_compute<ndim, DType, OP>(N, req == kAddTo, lhs.dptr<DType>(), rhs.dptr<DType>(),
out.dptr<DType>(), lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>(),
out.shape_.get<ndim>());
}
template<typename Reducer, int ndim, typename DType, typename OP>
void seq_reduce_compute(const size_t N, const size_t M, const bool addto,
const DType *big, DType *small, const Shape<ndim> bshape,
const Shape<ndim> sshape, const Shape<ndim> rshape,
const Shape<ndim> rstride) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign<Reducer, ndim, DType, OP>(idx, M, addto, big, small, bshape, sshape, rshape,
rstride);
}
}
template <typename Reducer, int ndim, typename DType, typename OP>
void seq_reduce_compute_extra_mem(const size_t N, const size_t M, const bool addto,
const DType* big, DType* small,
const Shape<ndim> bshape,
const Shape<ndim> sshape,
const Shape<ndim> rshape,
const Shape<ndim> rstride,
const index_t* ws_dptr) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
Shape<ndim> coord = unravel(idx, sshape);
index_t j = ravel(coord, bshape);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual);
}
assign(&small[idx], addto, val);
}
}
template <typename Reducer, int ndim, typename DType, typename OP>
void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size(), M = rshape.Size();
seq_reduce_compute<Reducer, ndim, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
}
template <typename Reducer, int ndim, typename DType, typename OP>
void ReduceWithExtraMem(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
using namespace mxnet_op;
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_);
size_t N = small.shape_.Size(), M = rshape.Size();
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t k = 0; k < static_cast<index_t>(M); k++) {
Shape<ndim> coord = unravel(k, rshape);
ws_dptr[k] = dot(coord, rstride);
}
seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(),
small.shape_.get<ndim>(), rshape, rstride, ws_dptr);
}
template<int ndim, typename DType>
size_t ReduceWorkspaceSize(Stream<cpu> *s, const TShape& small, const OpReqType req,
const TShape& big) {
return 0;
}
template<int ndim, typename DType>
size_t ReduceWorkspaceSize(Stream<cpu> *s, const TShape& small, const OpReqType req,
const TShape& big, const TShape& lhs, const TShape& rhs) {
return 0;
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto,
const DType* __restrict big, const DType* __restrict lhs,
const DType* __restrict rhs, DType *small,
const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0,
const Shape<ndim>& small_shape, const Shape<ndim>& rshape,
const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape,
const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride,
const Shape<ndim>& rhs_stride) {
Shape<ndim> coord = unravel(idx, small_shape);
const index_t idx_big0 = ravel(coord, big_shape);
const index_t idx_lhs0 = ravel(coord, lhs_shape0);
const index_t idx_rhs0 = ravel(coord, rhs_shape0);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Shape<ndim> coord_big = unravel(k, rshape);
index_t idx_big = idx_big0 + dot(coord_big, rstride);
Shape<ndim> coord_lhs = unravel(k, lhs_shape);
index_t idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride);
Shape<ndim> coord_rhs = unravel(k, rhs_shape);
index_t idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride);
Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, val);
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void seq_reduce_compute(const size_t N, const size_t M, const bool addto,
const DType *big, const DType *lhs, const DType *rhs, DType *small,
const Shape<ndim> big_shape, const Shape<ndim> small_shape,
const Shape<ndim> rshape, const Shape<ndim> rstride,
const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride,
const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride,
const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small,
big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride,
lhs_stride, rhs_stride);
}
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs,
const TBlob& rhs) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size();
size_t M = rshape.Size();
Shape<ndim> lhs_shape, lhs_stride;
diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride);
Shape<ndim> rhs_shape, rhs_stride;
diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride);
seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>(
N, M, req == kAddTo,
big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(),
rshape, rstride,
lhs_shape, lhs_stride,
rhs_shape, rhs_stride,
lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>());
}
#endif
} // namespace broadcast
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
|
x_solve-brisbane.c | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB BT code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header-brisbane.h"
#include "work_lhs.h"
//#include "timers.h"
//---------------------------------------------------------------------
//
// Performs line solves in X direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//
//---------------------------------------------------------------------
void x_solve()
{
int i, j, k, m, n, isize, z;
// double pivot, coeff;
int gp22, gp12;
// double temp1, temp2, temp3;
double fjacX[5][5][PROBLEM_SIZE+1][JMAXP-1][KMAX-1];
double njacX[5][5][PROBLEM_SIZE+1][JMAXP-1][KMAX-1];
double lhsX[5][5][3][PROBLEM_SIZE][JMAXP-1][KMAX-1];
double pivot,coeff,temp1, temp2, temp3;
gp22 = grid_points[2]-2;
gp12 = grid_points[1]-2;
//---------------------------------------------------------------------
// This function computes the left hand side in the xi-direction
//---------------------------------------------------------------------
isize = grid_points[0]-1;
brisbane_mem mem_fjacX;
brisbane_mem mem_njacX;
brisbane_mem mem_lhsX;
brisbane_mem_create(sizeof(double) * 5 * 5 * (PROBLEM_SIZE + 1) * (JMAXP - 1) * (KMAX - 1), &mem_fjacX);
brisbane_mem_create(sizeof(double) * 5 * 5 * (PROBLEM_SIZE + 1) * (JMAXP - 1) * (KMAX - 1), &mem_njacX);
brisbane_mem_create(sizeof(double) * 5 * 5 * 3 * (PROBLEM_SIZE) * (JMAXP - 1) * (KMAX - 1), &mem_lhsX);
//---------------------------------------------------------------------
// determine a (labeled f) and n jacobians
//---------------------------------------------------------------------
#pragma omp target data map(alloc:fjacX[:][:][:][:][:],njacX[:][:][:][:][:],lhsX[:][:][:][:][:][:])
//present(rho_i,u,qs,rhs,square)
{
size_t kernel_x_solve_0_off[2] = { 1, 0 };
size_t kernel_x_solve_0_idx[2] = { gp12, isize + 1 };
brisbane_kernel kernel_x_solve_0;
brisbane_kernel_create("x_solve_0", &kernel_x_solve_0);
brisbane_kernel_setmem(kernel_x_solve_0, 0, mem_rho_i, brisbane_r);
brisbane_kernel_setmem(kernel_x_solve_0, 1, mem_fjacX, brisbane_w);
brisbane_kernel_setmem(kernel_x_solve_0, 2, mem_njacX, brisbane_w);
brisbane_kernel_setmem(kernel_x_solve_0, 3, mem_u, brisbane_r);
brisbane_kernel_setmem(kernel_x_solve_0, 4, mem_qs, brisbane_r);
brisbane_kernel_setmem(kernel_x_solve_0, 5, mem_square, brisbane_r);
brisbane_kernel_setarg(kernel_x_solve_0, 6, sizeof(double), &c1);
brisbane_kernel_setarg(kernel_x_solve_0, 7, sizeof(double), &c2);
brisbane_kernel_setarg(kernel_x_solve_0, 8, sizeof(double), &c3c4);
brisbane_kernel_setarg(kernel_x_solve_0, 9, sizeof(double), &c1345);
brisbane_kernel_setarg(kernel_x_solve_0, 10, sizeof(double), &con43);
brisbane_kernel_setarg(kernel_x_solve_0, 11, sizeof(int), &gp22);
brisbane_task task0;
brisbane_task_create(&task0);
brisbane_task_kernel(task0, kernel_x_solve_0, 2, kernel_x_solve_0_off, kernel_x_solve_0_idx);
brisbane_task_submit(task0, brisbane_cpu, NULL, true);
#if 0
#pragma omp target teams distribute parallel for collapse(2) private(temp1,temp2,temp3,i,j,k)
for (i = 0; i <= isize; i++) {
for (j = 1; j <= gp12; j++) {
for (k = 1; k <= gp22; k++) {
temp1 = rho_i[k][j][i];
temp2 = temp1 * temp1;
temp3 = temp1 * temp2;
//-------------------------------------------------------------------
//
//-------------------------------------------------------------------
fjacX[0][0][i][j][k] = 0.0;
fjacX[0][1][i][j][k] = 1.0;
fjacX[0][2][i][j][k] = 0.0;
fjacX[0][3][i][j][k] = 0.0;
fjacX[0][4][i][j][k] = 0.0;
fjacX[1][0][i][j][k] = -(u[k][j][i][1] * temp2 * u[k][j][i][1])
+ c2 * qs[k][j][i];
fjacX[1][1][i][j][k] = ( 2.0 - c2 ) * ( u[k][j][i][1] / u[k][j][i][0] );
fjacX[1][2][i][j][k] = - c2 * ( u[k][j][i][2] * temp1 );
fjacX[1][3][i][j][k] = - c2 * ( u[k][j][i][3] * temp1 );
fjacX[1][4][i][j][k] = c2;
fjacX[2][0][i][j][k] = - ( u[k][j][i][1]*u[k][j][i][2] ) * temp2;
fjacX[2][1][i][j][k] = u[k][j][i][2] * temp1;
fjacX[2][2][i][j][k] = u[k][j][i][1] * temp1;
fjacX[2][3][i][j][k] = 0.0;
fjacX[2][4][i][j][k] = 0.0;
fjacX[3][0][i][j][k] = - ( u[k][j][i][1]*u[k][j][i][3] ) * temp2;
fjacX[3][1][i][j][k] = u[k][j][i][3] * temp1;
fjacX[3][2][i][j][k] = 0.0;
fjacX[3][3][i][j][k] = u[k][j][i][1] * temp1;
fjacX[3][4][i][j][k] = 0.0;
fjacX[4][0][i][j][k] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* ( u[k][j][i][1] * temp2 );
fjacX[4][1][i][j][k] = c1 * u[k][j][i][4] * temp1
- c2 * ( u[k][j][i][1]*u[k][j][i][1] * temp2 + qs[k][j][i] );
fjacX[4][2][i][j][k] = - c2 * ( u[k][j][i][2]*u[k][j][i][1] ) * temp2;
fjacX[4][3][i][j][k] = - c2 * ( u[k][j][i][3]*u[k][j][i][1] ) * temp2;
fjacX[4][4][i][j][k] = c1 * ( u[k][j][i][1] * temp1 );
njacX[0][0][i][j][k] = 0.0;
njacX[0][1][i][j][k] = 0.0;
njacX[0][2][i][j][k] = 0.0;
njacX[0][3][i][j][k] = 0.0;
njacX[0][4][i][j][k] = 0.0;
njacX[1][0][i][j][k] = - con43 * c3c4 * temp2 * u[k][j][i][1];
njacX[1][1][i][j][k] = con43 * c3c4 * temp1;
njacX[1][2][i][j][k] = 0.0;
njacX[1][3][i][j][k] = 0.0;
njacX[1][4][i][j][k] = 0.0;
njacX[2][0][i][j][k] = - c3c4 * temp2 * u[k][j][i][2];
njacX[2][1][i][j][k] = 0.0;
njacX[2][2][i][j][k] = c3c4 * temp1;
njacX[2][3][i][j][k] = 0.0;
njacX[2][4][i][j][k] = 0.0;
njacX[3][0][i][j][k] = - c3c4 * temp2 * u[k][j][i][3];
njacX[3][1][i][j][k] = 0.0;
njacX[3][2][i][j][k] = 0.0;
njacX[3][3][i][j][k] = c3c4 * temp1;
njacX[3][4][i][j][k] = 0.0;
njacX[4][0][i][j][k] = - ( con43 * c3c4
- c1345 ) * temp3 * (u[k][j][i][1]*u[k][j][i][1])
- ( c3c4 - c1345 ) * temp3 * (u[k][j][i][2]*u[k][j][i][2])
- ( c3c4 - c1345 ) * temp3 * (u[k][j][i][3]*u[k][j][i][3])
- c1345 * temp2 * u[k][j][i][4];
njacX[4][1][i][j][k] = ( con43 * c3c4
- c1345 ) * temp2 * u[k][j][i][1];
njacX[4][2][i][j][k] = ( c3c4 - c1345 ) * temp2 * u[k][j][i][2];
njacX[4][3][i][j][k] = ( c3c4 - c1345 ) * temp2 * u[k][j][i][3];
njacX[4][4][i][j][k] = ( c1345 ) * temp1;
}
}
}
#endif
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in x direction
//---------------------------------------------------------------------
// lhsX[k][j]init(lhsX[k][j], isize);
// zero the whole left hand side for starters
size_t kernel_x_solve_1_off[3] = { 0, 1, 1 };
size_t kernel_x_solve_1_idx[3] = { 5, gp12, gp22 };
brisbane_kernel kernel_x_solve_1;
brisbane_kernel_create("x_solve_1", &kernel_x_solve_1);
brisbane_kernel_setmem(kernel_x_solve_1, 0, mem_lhsX, brisbane_w);
brisbane_kernel_setarg(kernel_x_solve_1, 1, sizeof(int), &isize);
brisbane_task task1;
brisbane_task_create(&task1);
brisbane_task_kernel(task1, kernel_x_solve_1, 3, kernel_x_solve_1_off, kernel_x_solve_1_idx);
brisbane_task_submit(task1, brisbane_cpu, NULL, true);
#if 0
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for collapse(3) private(k,j,n,m)
#else
#pragma omp target teams distribute parallel for simd collapse(4)
#endif
for (k = 1; k <= gp22; k++) {
for (j = 1; j <= gp12; j++) {
for (n = 0; n < 5; n++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd
#endif
for (m = 0; m < 5; m++){
lhsX[m][n][0][0][j][k] = 0.0;
lhsX[m][n][1][0][j][k] = 0.0;
lhsX[m][n][2][0][j][k] = 0.0;
lhsX[m][n][0][isize][j][k] = 0.0;
lhsX[m][n][1][isize][j][k] = 0.0;
lhsX[m][n][2][isize][j][k] = 0.0;
}
}
}
}
#endif
// next, set all diagonal values to 1. This is overkill, but convenient
size_t kernel_x_solve_2_off[2] = { 1, 1 };
size_t kernel_x_solve_2_idx[2] = { gp12, gp22 };
brisbane_kernel kernel_x_solve_2;
brisbane_kernel_create("x_solve_2", &kernel_x_solve_2);
brisbane_kernel_setmem(kernel_x_solve_2, 0, mem_lhsX, brisbane_w);
brisbane_kernel_setarg(kernel_x_solve_2, 1, sizeof(int), &isize);
brisbane_task task2;
brisbane_task_create(&task2);
brisbane_task_kernel(task2, kernel_x_solve_2, 2, kernel_x_solve_2_off, kernel_x_solve_2_idx);
brisbane_task_submit(task2, brisbane_cpu, NULL, true);
#if 0
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for private(k,j) // collapse(2)
#else
#pragma omp target teams distribute parallel for simd collapse(2)
#endif
for (k = 1; k <= gp22; k++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd
#endif
for (j = 1; j <= gp12; j++) {
lhsX[0][0][1][0][j][k] = 1.0;
lhsX[0][0][1][isize][j][k] = 1.0;
lhsX[1][1][1][0][j][k] = 1.0;
lhsX[1][1][1][isize][j][k] = 1.0;
lhsX[2][2][1][0][j][k] = 1.0;
lhsX[2][2][1][isize][j][k] = 1.0;
lhsX[3][3][1][0][j][k] = 1.0;
lhsX[3][3][1][isize][j][k] = 1.0;
lhsX[4][4][1][0][j][k] = 1.0;
lhsX[4][4][1][isize][j][k] = 1.0;
}
}
#endif
size_t kernel_x_solve_3_off[3] = { 1, 1, 1 };
size_t kernel_x_solve_3_idx[3] = { gp22, gp12, isize - 1 };
brisbane_kernel kernel_x_solve_3;
brisbane_kernel_create("x_solve_3", &kernel_x_solve_3);
brisbane_kernel_setmem(kernel_x_solve_3, 0, mem_lhsX, brisbane_w);
brisbane_kernel_setmem(kernel_x_solve_3, 1, mem_fjacX, brisbane_r);
brisbane_kernel_setmem(kernel_x_solve_3, 2, mem_njacX, brisbane_r);
brisbane_kernel_setarg(kernel_x_solve_3, 3, sizeof(double), &dt);
brisbane_kernel_setarg(kernel_x_solve_3, 4, sizeof(double), &tx1);
brisbane_kernel_setarg(kernel_x_solve_3, 5, sizeof(double), &tx2);
brisbane_kernel_setarg(kernel_x_solve_3, 6, sizeof(double), &dx1);
brisbane_kernel_setarg(kernel_x_solve_3, 7, sizeof(double), &dx2);
brisbane_kernel_setarg(kernel_x_solve_3, 8, sizeof(double), &dx3);
brisbane_kernel_setarg(kernel_x_solve_3, 9, sizeof(double), &dx4);
brisbane_kernel_setarg(kernel_x_solve_3, 10, sizeof(double), &dx5);
brisbane_task task3;
brisbane_task_create(&task3);
brisbane_task_kernel(task3, kernel_x_solve_3, 3, kernel_x_solve_3_off, kernel_x_solve_3_idx);
brisbane_task_submit(task3, brisbane_cpu, NULL, true);
#if 0
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for collapse(2) private(j,k)
#else
#pragma omp target teams distribute parallel for simd collapse(3) private(temp1,temp2)
#endif
for (i = 1; i <= isize-1; i++) {
for (j = 1; j <= gp12; j++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd private(temp1, temp2)
#endif
for (k = 1; k <= gp22; k++) {
temp1 = dt * tx1;
temp2 = dt * tx2;
lhsX[0][0][AA][i][j][k] = - temp2 * fjacX[0][0][i-1][j][k]
- temp1 * njacX[0][0][i-1][j][k]
- temp1 * dx1;
lhsX[0][1][AA][i][j][k] = - temp2 * fjacX[0][1][i-1][j][k]
- temp1 * njacX[0][1][i-1][j][k];
lhsX[0][2][AA][i][j][k] = - temp2 * fjacX[0][2][i-1][j][k]
- temp1 * njacX[0][2][i-1][j][k];
lhsX[0][3][AA][i][j][k] = - temp2 * fjacX[0][3][i-1][j][k]
- temp1 * njacX[0][3][i-1][j][k];
lhsX[0][4][AA][i][j][k] = - temp2 * fjacX[0][4][i-1][j][k]
- temp1 * njacX[0][4][i-1][j][k];
lhsX[1][0][AA][i][j][k] = - temp2 * fjacX[1][0][i-1][j][k]
- temp1 * njacX[1][0][i-1][j][k];
lhsX[1][1][AA][i][j][k] = - temp2 * fjacX[1][1][i-1][j][k]
- temp1 * njacX[1][1][i-1][j][k]
- temp1 * dx2;
lhsX[1][2][AA][i][j][k] = - temp2 * fjacX[1][2][i-1][j][k]
- temp1 * njacX[1][2][i-1][j][k];
lhsX[1][3][AA][i][j][k] = - temp2 * fjacX[1][3][i-1][j][k]
- temp1 * njacX[1][3][i-1][j][k];
lhsX[1][4][AA][i][j][k] = - temp2 * fjacX[1][4][i-1][j][k]
- temp1 * njacX[1][4][i-1][j][k];
lhsX[2][0][AA][i][j][k] = - temp2 * fjacX[2][0][i-1][j][k]
- temp1 * njacX[2][0][i-1][j][k];
lhsX[2][1][AA][i][j][k] = - temp2 * fjacX[2][1][i-1][j][k]
- temp1 * njacX[2][1][i-1][j][k];
lhsX[2][2][AA][i][j][k] = - temp2 * fjacX[2][2][i-1][j][k]
- temp1 * njacX[2][2][i-1][j][k]
- temp1 * dx3;
lhsX[2][3][AA][i][j][k] = - temp2 * fjacX[2][3][i-1][j][k]
- temp1 * njacX[2][3][i-1][j][k];
lhsX[2][4][AA][i][j][k] = - temp2 * fjacX[2][4][i-1][j][k]
- temp1 * njacX[2][4][i-1][j][k];
lhsX[3][0][AA][i][j][k] = - temp2 * fjacX[3][0][i-1][j][k]
- temp1 * njacX[3][0][i-1][j][k];
lhsX[3][1][AA][i][j][k] = - temp2 * fjacX[3][1][i-1][j][k]
- temp1 * njacX[3][1][i-1][j][k];
lhsX[3][2][AA][i][j][k] = - temp2 * fjacX[3][2][i-1][j][k]
- temp1 * njacX[3][2][i-1][j][k];
lhsX[3][3][AA][i][j][k] = - temp2 * fjacX[3][3][i-1][j][k]
- temp1 * njacX[3][3][i-1][j][k]
- temp1 * dx4;
lhsX[3][4][AA][i][j][k] = - temp2 * fjacX[3][4][i-1][j][k]
- temp1 * njacX[3][4][i-1][j][k];
lhsX[4][0][AA][i][j][k] = - temp2 * fjacX[4][0][i-1][j][k]
- temp1 * njacX[4][0][i-1][j][k];
lhsX[4][1][AA][i][j][k] = - temp2 * fjacX[4][1][i-1][j][k]
- temp1 * njacX[4][1][i-1][j][k];
lhsX[4][2][AA][i][j][k] = - temp2 * fjacX[4][2][i-1][j][k]
- temp1 * njacX[4][2][i-1][j][k];
lhsX[4][3][AA][i][j][k] = - temp2 * fjacX[4][3][i-1][j][k]
- temp1 * njacX[4][3][i-1][j][k];
lhsX[4][4][AA][i][j][k] = - temp2 * fjacX[4][4][i-1][j][k]
- temp1 * njacX[4][4][i-1][j][k]
- temp1 * dx5;
lhsX[0][0][BB][i][j][k] = 1.0
+ temp1 * 2.0 * njacX[0][0][i][j][k]
+ temp1 * 2.0 * dx1;
lhsX[0][1][BB][i][j][k] = temp1 * 2.0 * njacX[0][1][i][j][k];
lhsX[0][2][BB][i][j][k] = temp1 * 2.0 * njacX[0][2][i][j][k];
lhsX[0][3][BB][i][j][k] = temp1 * 2.0 * njacX[0][3][i][j][k];
lhsX[0][4][BB][i][j][k] = temp1 * 2.0 * njacX[0][4][i][j][k];
lhsX[1][0][BB][i][j][k] = temp1 * 2.0 * njacX[1][0][i][j][k];
lhsX[1][1][BB][i][j][k] = 1.0
+ temp1 * 2.0 * njacX[1][1][i][j][k]
+ temp1 * 2.0 * dx2;
lhsX[1][2][BB][i][j][k] = temp1 * 2.0 * njacX[1][2][i][j][k];
lhsX[1][3][BB][i][j][k] = temp1 * 2.0 * njacX[1][3][i][j][k];
lhsX[1][4][BB][i][j][k] = temp1 * 2.0 * njacX[1][4][i][j][k];
lhsX[2][0][BB][i][j][k] = temp1 * 2.0 * njacX[2][0][i][j][k];
lhsX[2][1][BB][i][j][k] = temp1 * 2.0 * njacX[2][1][i][j][k];
lhsX[2][2][BB][i][j][k] = 1.0
+ temp1 * 2.0 * njacX[2][2][i][j][k]
+ temp1 * 2.0 * dx3;
lhsX[2][3][BB][i][j][k] = temp1 * 2.0 * njacX[2][3][i][j][k];
lhsX[2][4][BB][i][j][k] = temp1 * 2.0 * njacX[2][4][i][j][k];
lhsX[3][0][BB][i][j][k] = temp1 * 2.0 * njacX[3][0][i][j][k];
lhsX[3][1][BB][i][j][k] = temp1 * 2.0 * njacX[3][1][i][j][k];
lhsX[3][2][BB][i][j][k] = temp1 * 2.0 * njacX[3][2][i][j][k];
lhsX[3][3][BB][i][j][k] = 1.0
+ temp1 * 2.0 * njacX[3][3][i][j][k]
+ temp1 * 2.0 * dx4;
lhsX[3][4][BB][i][j][k] = temp1 * 2.0 * njacX[3][4][i][j][k];
lhsX[4][0][BB][i][j][k] = temp1 * 2.0 * njacX[4][0][i][j][k];
lhsX[4][1][BB][i][j][k] = temp1 * 2.0 * njacX[4][1][i][j][k];
lhsX[4][2][BB][i][j][k] = temp1 * 2.0 * njacX[4][2][i][j][k];
lhsX[4][3][BB][i][j][k] = temp1 * 2.0 * njacX[4][3][i][j][k];
lhsX[4][4][BB][i][j][k] = 1.0
+ temp1 * 2.0 * njacX[4][4][i][j][k]
+ temp1 * 2.0 * dx5;
lhsX[0][0][CC][i][j][k] = temp2 * fjacX[0][0][i+1][j][k]
- temp1 * njacX[0][0][i+1][j][k]
- temp1 * dx1;
lhsX[0][1][CC][i][j][k] = temp2 * fjacX[0][1][i+1][j][k]
- temp1 * njacX[0][1][i+1][j][k];
lhsX[0][2][CC][i][j][k] = temp2 * fjacX[0][2][i+1][j][k]
- temp1 * njacX[0][2][i+1][j][k];
lhsX[0][3][CC][i][j][k] = temp2 * fjacX[0][3][i+1][j][k]
- temp1 * njacX[0][3][i+1][j][k];
lhsX[0][4][CC][i][j][k] = temp2 * fjacX[0][4][i+1][j][k]
- temp1 * njacX[0][4][i+1][j][k];
lhsX[1][0][CC][i][j][k] = temp2 * fjacX[1][0][i+1][j][k]
- temp1 * njacX[1][0][i+1][j][k];
lhsX[1][1][CC][i][j][k] = temp2 * fjacX[1][1][i+1][j][k]
- temp1 * njacX[1][1][i+1][j][k]
- temp1 * dx2;
lhsX[1][2][CC][i][j][k] = temp2 * fjacX[1][2][i+1][j][k]
- temp1 * njacX[1][2][i+1][j][k];
lhsX[1][3][CC][i][j][k] = temp2 * fjacX[1][3][i+1][j][k]
- temp1 * njacX[1][3][i+1][j][k];
lhsX[1][4][CC][i][j][k] = temp2 * fjacX[1][4][i+1][j][k]
- temp1 * njacX[1][4][i+1][j][k];
lhsX[2][0][CC][i][j][k] = temp2 * fjacX[2][0][i+1][j][k]
- temp1 * njacX[2][0][i+1][j][k];
lhsX[2][1][CC][i][j][k] = temp2 * fjacX[2][1][i+1][j][k]
- temp1 * njacX[2][1][i+1][j][k];
lhsX[2][2][CC][i][j][k] = temp2 * fjacX[2][2][i+1][j][k]
- temp1 * njacX[2][2][i+1][j][k]
- temp1 * dx3;
lhsX[2][3][CC][i][j][k] = temp2 * fjacX[2][3][i+1][j][k]
- temp1 * njacX[2][3][i+1][j][k];
lhsX[2][4][CC][i][j][k] = temp2 * fjacX[2][4][i+1][j][k]
- temp1 * njacX[2][4][i+1][j][k];
lhsX[3][0][CC][i][j][k] = temp2 * fjacX[3][0][i+1][j][k]
- temp1 * njacX[3][0][i+1][j][k];
lhsX[3][1][CC][i][j][k] = temp2 * fjacX[3][1][i+1][j][k]
- temp1 * njacX[3][1][i+1][j][k];
lhsX[3][2][CC][i][j][k] = temp2 * fjacX[3][2][i+1][j][k]
- temp1 * njacX[3][2][i+1][j][k];
lhsX[3][3][CC][i][j][k] = temp2 * fjacX[3][3][i+1][j][k]
- temp1 * njacX[3][3][i+1][j][k]
- temp1 * dx4;
lhsX[3][4][CC][i][j][k] = temp2 * fjacX[3][4][i+1][j][k]
- temp1 * njacX[3][4][i+1][j][k];
lhsX[4][0][CC][i][j][k] = temp2 * fjacX[4][0][i+1][j][k]
- temp1 * njacX[4][0][i+1][j][k];
lhsX[4][1][CC][i][j][k] = temp2 * fjacX[4][1][i+1][j][k]
- temp1 * njacX[4][1][i+1][j][k];
lhsX[4][2][CC][i][j][k] = temp2 * fjacX[4][2][i+1][j][k]
- temp1 * njacX[4][2][i+1][j][k];
lhsX[4][3][CC][i][j][k] = temp2 * fjacX[4][3][i+1][j][k]
- temp1 * njacX[4][3][i+1][j][k];
lhsX[4][4][CC][i][j][k] = temp2 * fjacX[4][4][i+1][j][k]
- temp1 * njacX[4][4][i+1][j][k]
- temp1 * dx5;
}
}
}
#endif
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(IMAX) and rhs'(IMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][j][0] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
//binvcrhs( lhsX[0][j][BB], lhsX[k][0][j][k][CC], rhs[k][j][0] );
size_t kernel_x_solve_4_off[2] = { 1, 1 };
size_t kernel_x_solve_4_idx[2] = { gp22, gp12 };
brisbane_kernel kernel_x_solve_4;
brisbane_kernel_create("x_solve_4", &kernel_x_solve_4);
brisbane_kernel_setmem(kernel_x_solve_4, 0, mem_lhsX, brisbane_rw);
brisbane_kernel_setmem(kernel_x_solve_4, 1, mem_rhs, brisbane_rw);
brisbane_task task4;
brisbane_task_create(&task4);
brisbane_task_kernel(task4, kernel_x_solve_4, 2, kernel_x_solve_4_off, kernel_x_solve_4_idx);
brisbane_task_submit(task4, brisbane_cpu, NULL, true);
#if 0
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for private(j,k,pivot, coeff)
#else
#pragma omp target teams distribute parallel for simd private(pivot, coeff) collapse(2)
#endif
for (j = 1; j <= gp12; j++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd private(pivot, coeff)
#endif
for (k = 1; k <= gp22; k++) {
/*
for(m = 0; m < 5; m++){
pivot = 1.00/lhsX[m][m][BB][0][j][k];
for(n = m+1; n < 5; n++){
lhsX[m][n][BB][0][j][k] = lhsX[m][n][BB][0][j][k]*pivot;
}
lhsX[m][0][CC][0][j][k] = lhsX[m][0][CC][0][j][k]*pivot;
lhsX[m][1][CC][0][j][k] = lhsX[m][1][CC][0][j][k]*pivot;
lhsX[m][2][CC][0][j][k] = lhsX[m][2][CC][0][j][k]*pivot;
lhsX[m][3][CC][0][j][k] = lhsX[m][3][CC][0][j][k]*pivot;
lhsX[m][4][CC][0][j][k] = lhsX[m][4][CC][0][j][k]*pivot;
rhs[k][j][0][m] = rhs[k][j][0][m]*pivot;
for(n = 0; n < 5; n++){
if(n != m){
coeff = lhsX[n][m][BB][0][j][k];
for(z = m+1; z < 5; z++){
lhsX[n][z][BB][0][j][k] = lhsX[n][z][BB][0][j][k] - coeff*lhsX[m][z][BB][0][j][k];
}
lhsX[n][0][CC][0][j][k] = lhsX[n][0][CC][0][j][k] - coeff*lhsX[m][0][CC][0][j][k];
lhsX[n][1][CC][0][j][k] = lhsX[n][1][CC][0][j][k] - coeff*lhsX[m][1][CC][0][j][k];
lhsX[n][2][CC][0][j][k] = lhsX[n][2][CC][0][j][k] - coeff*lhsX[m][2][CC][0][j][k];
lhsX[n][3][CC][0][j][k] = lhsX[n][3][CC][0][j][k] - coeff*lhsX[m][3][CC][0][j][k];
lhsX[n][4][CC][0][j][k] = lhsX[n][4][CC][0][j][k] - coeff*lhsX[m][4][CC][0][j][k];
rhs[k][j][0][n] = rhs[k][j][0][n] - coeff*rhs[k][j][0][m];
}
}
}
*/
pivot = 1.00/lhsX[0][0][BB][0][j][k];
lhsX[0][1][BB][0][j][k] = lhsX[0][1][BB][0][j][k]*pivot;
lhsX[0][2][BB][0][j][k] = lhsX[0][2][BB][0][j][k]*pivot;
lhsX[0][3][BB][0][j][k] = lhsX[0][3][BB][0][j][k]*pivot;
lhsX[0][4][BB][0][j][k] = lhsX[0][4][BB][0][j][k]*pivot;
lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k]*pivot;
lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k]*pivot;
lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k]*pivot;
lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k]*pivot;
lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k]*pivot;
rhs[k][j][0][0] = rhs[k][j][0][0] *pivot;
coeff = lhsX[1][0][BB][0][j][k];
lhsX[1][1][BB][0][j][k]= lhsX[1][1][BB][0][j][k] - coeff*lhsX[0][1][BB][0][j][k];
lhsX[1][2][BB][0][j][k]= lhsX[1][2][BB][0][j][k] - coeff*lhsX[0][2][BB][0][j][k];
lhsX[1][3][BB][0][j][k]= lhsX[1][3][BB][0][j][k] - coeff*lhsX[0][3][BB][0][j][k];
lhsX[1][4][BB][0][j][k]= lhsX[1][4][BB][0][j][k] - coeff*lhsX[0][4][BB][0][j][k];
lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k] - coeff*lhsX[0][0][CC][0][j][k];
lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k] - coeff*lhsX[0][1][CC][0][j][k];
lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k] - coeff*lhsX[0][2][CC][0][j][k];
lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k] - coeff*lhsX[0][3][CC][0][j][k];
lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k] - coeff*lhsX[0][4][CC][0][j][k];
rhs[k][j][0][1] = rhs[k][j][0][1] - coeff*rhs[k][j][0][0];
coeff = lhsX[2][0][BB][0][j][k];
lhsX[2][1][BB][0][j][k]= lhsX[2][1][BB][0][j][k] - coeff*lhsX[0][1][BB][0][j][k];
lhsX[2][2][BB][0][j][k]= lhsX[2][2][BB][0][j][k] - coeff*lhsX[0][2][BB][0][j][k];
lhsX[2][3][BB][0][j][k]= lhsX[2][3][BB][0][j][k] - coeff*lhsX[0][3][BB][0][j][k];
lhsX[2][4][BB][0][j][k]= lhsX[2][4][BB][0][j][k] - coeff*lhsX[0][4][BB][0][j][k];
lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k] - coeff*lhsX[0][0][CC][0][j][k];
lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k] - coeff*lhsX[0][1][CC][0][j][k];
lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k] - coeff*lhsX[0][2][CC][0][j][k];
lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k] - coeff*lhsX[0][3][CC][0][j][k];
lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k] - coeff*lhsX[0][4][CC][0][j][k];
rhs[k][j][0][2] = rhs[k][j][0][2] - coeff*rhs[k][j][0][0];
coeff = lhsX[3][0][BB][0][j][k];
lhsX[3][1][BB][0][j][k]= lhsX[3][1][BB][0][j][k] - coeff*lhsX[0][1][BB][0][j][k];
lhsX[3][2][BB][0][j][k]= lhsX[3][2][BB][0][j][k] - coeff*lhsX[0][2][BB][0][j][k];
lhsX[3][3][BB][0][j][k]= lhsX[3][3][BB][0][j][k] - coeff*lhsX[0][3][BB][0][j][k];
lhsX[3][4][BB][0][j][k]= lhsX[3][4][BB][0][j][k] - coeff*lhsX[0][4][BB][0][j][k];
lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k] - coeff*lhsX[0][0][CC][0][j][k];
lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k] - coeff*lhsX[0][1][CC][0][j][k];
lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k] - coeff*lhsX[0][2][CC][0][j][k];
lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k] - coeff*lhsX[0][3][CC][0][j][k];
lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k] - coeff*lhsX[0][4][CC][0][j][k];
rhs[k][j][0][3] = rhs[k][j][0][3] - coeff*rhs[k][j][0][0];
coeff = lhsX[4][0][BB][0][j][k];
lhsX[4][1][BB][0][j][k]= lhsX[4][1][BB][0][j][k] - coeff*lhsX[0][1][BB][0][j][k];
lhsX[4][2][BB][0][j][k]= lhsX[4][2][BB][0][j][k] - coeff*lhsX[0][2][BB][0][j][k];
lhsX[4][3][BB][0][j][k]= lhsX[4][3][BB][0][j][k] - coeff*lhsX[0][3][BB][0][j][k];
lhsX[4][4][BB][0][j][k]= lhsX[4][4][BB][0][j][k] - coeff*lhsX[0][4][BB][0][j][k];
lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k] - coeff*lhsX[0][0][CC][0][j][k];
lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k] - coeff*lhsX[0][1][CC][0][j][k];
lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k] - coeff*lhsX[0][2][CC][0][j][k];
lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k] - coeff*lhsX[0][3][CC][0][j][k];
lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k] - coeff*lhsX[0][4][CC][0][j][k];
rhs[k][j][0][4] = rhs[k][j][0][4] - coeff*rhs[k][j][0][0];
pivot = 1.00/lhsX[1][1][BB][0][j][k];
lhsX[1][2][BB][0][j][k] = lhsX[1][2][BB][0][j][k]*pivot;
lhsX[1][3][BB][0][j][k] = lhsX[1][3][BB][0][j][k]*pivot;
lhsX[1][4][BB][0][j][k] = lhsX[1][4][BB][0][j][k]*pivot;
lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k]*pivot;
lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k]*pivot;
lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k]*pivot;
lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k]*pivot;
lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k]*pivot;
rhs[k][j][0][1] = rhs[k][j][0][1] *pivot;
coeff = lhsX[0][1][BB][0][j][k];
lhsX[0][2][BB][0][j][k]= lhsX[0][2][BB][0][j][k] - coeff*lhsX[1][2][BB][0][j][k];
lhsX[0][3][BB][0][j][k]= lhsX[0][3][BB][0][j][k] - coeff*lhsX[1][3][BB][0][j][k];
lhsX[0][4][BB][0][j][k]= lhsX[0][4][BB][0][j][k] - coeff*lhsX[1][4][BB][0][j][k];
lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k] - coeff*lhsX[1][0][CC][0][j][k];
lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k] - coeff*lhsX[1][1][CC][0][j][k];
lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k] - coeff*lhsX[1][2][CC][0][j][k];
lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k] - coeff*lhsX[1][3][CC][0][j][k];
lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k] - coeff*lhsX[1][4][CC][0][j][k];
rhs[k][j][0][0] = rhs[k][j][0][0] - coeff*rhs[k][j][0][1];
coeff = lhsX[2][1][BB][0][j][k];
lhsX[2][2][BB][0][j][k]= lhsX[2][2][BB][0][j][k] - coeff*lhsX[1][2][BB][0][j][k];
lhsX[2][3][BB][0][j][k]= lhsX[2][3][BB][0][j][k] - coeff*lhsX[1][3][BB][0][j][k];
lhsX[2][4][BB][0][j][k]= lhsX[2][4][BB][0][j][k] - coeff*lhsX[1][4][BB][0][j][k];
lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k] - coeff*lhsX[1][0][CC][0][j][k];
lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k] - coeff*lhsX[1][1][CC][0][j][k];
lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k] - coeff*lhsX[1][2][CC][0][j][k];
lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k] - coeff*lhsX[1][3][CC][0][j][k];
lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k] - coeff*lhsX[1][4][CC][0][j][k];
rhs[k][j][0][2] = rhs[k][j][0][2] - coeff*rhs[k][j][0][1];
coeff = lhsX[3][1][BB][0][j][k];
lhsX[3][2][BB][0][j][k]= lhsX[3][2][BB][0][j][k] - coeff*lhsX[1][2][BB][0][j][k];
lhsX[3][3][BB][0][j][k]= lhsX[3][3][BB][0][j][k] - coeff*lhsX[1][3][BB][0][j][k];
lhsX[3][4][BB][0][j][k]= lhsX[3][4][BB][0][j][k] - coeff*lhsX[1][4][BB][0][j][k];
lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k] - coeff*lhsX[1][0][CC][0][j][k];
lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k] - coeff*lhsX[1][1][CC][0][j][k];
lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k] - coeff*lhsX[1][2][CC][0][j][k];
lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k] - coeff*lhsX[1][3][CC][0][j][k];
lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k] - coeff*lhsX[1][4][CC][0][j][k];
rhs[k][j][0][3] = rhs[k][j][0][3] - coeff*rhs[k][j][0][1];
coeff = lhsX[4][1][BB][0][j][k];
lhsX[4][2][BB][0][j][k]= lhsX[4][2][BB][0][j][k] - coeff*lhsX[1][2][BB][0][j][k];
lhsX[4][3][BB][0][j][k]= lhsX[4][3][BB][0][j][k] - coeff*lhsX[1][3][BB][0][j][k];
lhsX[4][4][BB][0][j][k]= lhsX[4][4][BB][0][j][k] - coeff*lhsX[1][4][BB][0][j][k];
lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k] - coeff*lhsX[1][0][CC][0][j][k];
lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k] - coeff*lhsX[1][1][CC][0][j][k];
lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k] - coeff*lhsX[1][2][CC][0][j][k];
lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k] - coeff*lhsX[1][3][CC][0][j][k];
lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k] - coeff*lhsX[1][4][CC][0][j][k];
rhs[k][j][0][4] = rhs[k][j][0][4] - coeff*rhs[k][j][0][1];
pivot = 1.00/lhsX[2][2][BB][0][j][k];
lhsX[2][3][BB][0][j][k] = lhsX[2][3][BB][0][j][k]*pivot;
lhsX[2][4][BB][0][j][k] = lhsX[2][4][BB][0][j][k]*pivot;
lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k]*pivot;
lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k]*pivot;
lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k]*pivot;
lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k]*pivot;
lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k]*pivot;
rhs[k][j][0][2] = rhs[k][j][0][2] *pivot;
coeff = lhsX[0][2][BB][0][j][k];
lhsX[0][3][BB][0][j][k]= lhsX[0][3][BB][0][j][k] - coeff*lhsX[2][3][BB][0][j][k];
lhsX[0][4][BB][0][j][k]= lhsX[0][4][BB][0][j][k] - coeff*lhsX[2][4][BB][0][j][k];
lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k] - coeff*lhsX[2][0][CC][0][j][k];
lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k] - coeff*lhsX[2][1][CC][0][j][k];
lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k] - coeff*lhsX[2][2][CC][0][j][k];
lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k] - coeff*lhsX[2][3][CC][0][j][k];
lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k] - coeff*lhsX[2][4][CC][0][j][k];
rhs[k][j][0][0] = rhs[k][j][0][0] - coeff*rhs[k][j][0][2];
coeff = lhsX[1][2][BB][0][j][k];
lhsX[1][3][BB][0][j][k]= lhsX[1][3][BB][0][j][k] - coeff*lhsX[2][3][BB][0][j][k];
lhsX[1][4][BB][0][j][k]= lhsX[1][4][BB][0][j][k] - coeff*lhsX[2][4][BB][0][j][k];
lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k] - coeff*lhsX[2][0][CC][0][j][k];
lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k] - coeff*lhsX[2][1][CC][0][j][k];
lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k] - coeff*lhsX[2][2][CC][0][j][k];
lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k] - coeff*lhsX[2][3][CC][0][j][k];
lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k] - coeff*lhsX[2][4][CC][0][j][k];
rhs[k][j][0][1] = rhs[k][j][0][1] - coeff*rhs[k][j][0][2];
coeff = lhsX[3][2][BB][0][j][k];
lhsX[3][3][BB][0][j][k]= lhsX[3][3][BB][0][j][k] - coeff*lhsX[2][3][BB][0][j][k];
lhsX[3][4][BB][0][j][k]= lhsX[3][4][BB][0][j][k] - coeff*lhsX[2][4][BB][0][j][k];
lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k] - coeff*lhsX[2][0][CC][0][j][k];
lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k] - coeff*lhsX[2][1][CC][0][j][k];
lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k] - coeff*lhsX[2][2][CC][0][j][k];
lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k] - coeff*lhsX[2][3][CC][0][j][k];
lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k] - coeff*lhsX[2][4][CC][0][j][k];
rhs[k][j][0][3] = rhs[k][j][0][3] - coeff*rhs[k][j][0][2];
coeff = lhsX[4][2][BB][0][j][k];
lhsX[4][3][BB][0][j][k]= lhsX[4][3][BB][0][j][k] - coeff*lhsX[2][3][BB][0][j][k];
lhsX[4][4][BB][0][j][k]= lhsX[4][4][BB][0][j][k] - coeff*lhsX[2][4][BB][0][j][k];
lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k] - coeff*lhsX[2][0][CC][0][j][k];
lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k] - coeff*lhsX[2][1][CC][0][j][k];
lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k] - coeff*lhsX[2][2][CC][0][j][k];
lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k] - coeff*lhsX[2][3][CC][0][j][k];
lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k] - coeff*lhsX[2][4][CC][0][j][k];
rhs[k][j][0][4] = rhs[k][j][0][4] - coeff*rhs[k][j][0][2];
pivot = 1.00/lhsX[3][3][BB][0][j][k];
lhsX[3][4][BB][0][j][k] = lhsX[3][4][BB][0][j][k]*pivot;
lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k]*pivot;
lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k]*pivot;
lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k]*pivot;
lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k]*pivot;
lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k]*pivot;
rhs[k][j][0][3] = rhs[k][j][0][3] *pivot;
coeff = lhsX[0][3][BB][0][j][k];
lhsX[0][4][BB][0][j][k]= lhsX[0][4][BB][0][j][k] - coeff*lhsX[3][4][BB][0][j][k];
lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k] - coeff*lhsX[3][0][CC][0][j][k];
lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k] - coeff*lhsX[3][1][CC][0][j][k];
lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k] - coeff*lhsX[3][2][CC][0][j][k];
lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k] - coeff*lhsX[3][3][CC][0][j][k];
lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k] - coeff*lhsX[3][4][CC][0][j][k];
rhs[k][j][0][0] = rhs[k][j][0][0] - coeff*rhs[k][j][0][3];
coeff = lhsX[1][3][BB][0][j][k];
lhsX[1][4][BB][0][j][k]= lhsX[1][4][BB][0][j][k] - coeff*lhsX[3][4][BB][0][j][k];
lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k] - coeff*lhsX[3][0][CC][0][j][k];
lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k] - coeff*lhsX[3][1][CC][0][j][k];
lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k] - coeff*lhsX[3][2][CC][0][j][k];
lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k] - coeff*lhsX[3][3][CC][0][j][k];
lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k] - coeff*lhsX[3][4][CC][0][j][k];
rhs[k][j][0][1] = rhs[k][j][0][1] - coeff*rhs[k][j][0][3];
coeff = lhsX[2][3][BB][0][j][k];
lhsX[2][4][BB][0][j][k]= lhsX[2][4][BB][0][j][k] - coeff*lhsX[3][4][BB][0][j][k];
lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k] - coeff*lhsX[3][0][CC][0][j][k];
lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k] - coeff*lhsX[3][1][CC][0][j][k];
lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k] - coeff*lhsX[3][2][CC][0][j][k];
lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k] - coeff*lhsX[3][3][CC][0][j][k];
lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k] - coeff*lhsX[3][4][CC][0][j][k];
rhs[k][j][0][2] = rhs[k][j][0][2] - coeff*rhs[k][j][0][3];
coeff = lhsX[4][3][BB][0][j][k];
lhsX[4][4][BB][0][j][k]= lhsX[4][4][BB][0][j][k] - coeff*lhsX[3][4][BB][0][j][k];
lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k] - coeff*lhsX[3][0][CC][0][j][k];
lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k] - coeff*lhsX[3][1][CC][0][j][k];
lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k] - coeff*lhsX[3][2][CC][0][j][k];
lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k] - coeff*lhsX[3][3][CC][0][j][k];
lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k] - coeff*lhsX[3][4][CC][0][j][k];
rhs[k][j][0][4] = rhs[k][j][0][4] - coeff*rhs[k][j][0][3];
pivot = 1.00/lhsX[4][4][BB][0][j][k];
lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k]*pivot;
lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k]*pivot;
lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k]*pivot;
lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k]*pivot;
lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k]*pivot;
rhs[k][j][0][4] = rhs[k][j][0][4] *pivot;
coeff = lhsX[0][4][BB][0][j][k];
lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k] - coeff*lhsX[4][0][CC][0][j][k];
lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k] - coeff*lhsX[4][1][CC][0][j][k];
lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k] - coeff*lhsX[4][2][CC][0][j][k];
lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k] - coeff*lhsX[4][3][CC][0][j][k];
lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k] - coeff*lhsX[4][4][CC][0][j][k];
rhs[k][j][0][0] = rhs[k][j][0][0] - coeff*rhs[k][j][0][4];
coeff = lhsX[1][4][BB][0][j][k];
lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k] - coeff*lhsX[4][0][CC][0][j][k];
lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k] - coeff*lhsX[4][1][CC][0][j][k];
lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k] - coeff*lhsX[4][2][CC][0][j][k];
lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k] - coeff*lhsX[4][3][CC][0][j][k];
lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k] - coeff*lhsX[4][4][CC][0][j][k];
rhs[k][j][0][1] = rhs[k][j][0][1] - coeff*rhs[k][j][0][4];
coeff = lhsX[2][4][BB][0][j][k];
lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k] - coeff*lhsX[4][0][CC][0][j][k];
lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k] - coeff*lhsX[4][1][CC][0][j][k];
lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k] - coeff*lhsX[4][2][CC][0][j][k];
lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k] - coeff*lhsX[4][3][CC][0][j][k];
lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k] - coeff*lhsX[4][4][CC][0][j][k];
rhs[k][j][0][2] = rhs[k][j][0][2] - coeff*rhs[k][j][0][4];
coeff = lhsX[3][4][BB][0][j][k];
lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k] - coeff*lhsX[4][0][CC][0][j][k];
lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k] - coeff*lhsX[4][1][CC][0][j][k];
lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k] - coeff*lhsX[4][2][CC][0][j][k];
lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k] - coeff*lhsX[4][3][CC][0][j][k];
lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k] - coeff*lhsX[4][4][CC][0][j][k];
rhs[k][j][0][3] = rhs[k][j][0][3] - coeff*rhs[k][j][0][4];
}/*end j*/
}/*end k*/
#endif
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
size_t kernel_x_solve_5_off[1] = { 1 };
size_t kernel_x_solve_5_idx[1] = { gp12 };
brisbane_kernel kernel_x_solve_5;
brisbane_kernel_create("x_solve_5", &kernel_x_solve_5);
brisbane_kernel_setmem(kernel_x_solve_5, 0, mem_lhsX, brisbane_rw);
brisbane_kernel_setmem(kernel_x_solve_5, 1, mem_rhs, brisbane_rw);
brisbane_kernel_setarg(kernel_x_solve_5, 2, sizeof(int), &isize);
brisbane_kernel_setarg(kernel_x_solve_5, 3, sizeof(int), &gp22);
brisbane_task task5;
brisbane_task_create(&task5);
brisbane_task_kernel(task5, kernel_x_solve_5, 1, kernel_x_solve_5_off, kernel_x_solve_5_idx);
brisbane_task_submit(task5, brisbane_cpu, NULL, true);
#if 0
#pragma omp target teams distribute parallel for private(i,k)
for (j = 1; j <= gp12; j++) {
for (i = 1; i <= isize-1; i++) {
#pragma omp simd private(pivot,coeff)
for (k = 1; k <= gp22; k++) {
//-------------------------------------------------------------------
// rhs(i) = rhs(i) - A*rhs(i-1)
//-------------------------------------------------------------------
//matvec_sub(lhsX[i-1][j][AA], rhs[k][i][j][k], rhs[k][j][i]);
/*
for(m = 0; m < 5; m++){
rhs[k][j][i][m] = rhs[k][j][i][m] - lhsX[m][0][AA][i][j][k]*rhs[k][j][i-1][0]
- lhsX[m][1][AA][i][j][k]*rhs[k][j][i-1][1]
- lhsX[m][2][AA][i][j][k]*rhs[k][j][i-1][2]
- lhsX[m][3][AA][i][j][k]*rhs[k][j][i-1][3]
- lhsX[m][4][AA][i][j][k]*rhs[k][j][i-1][4];
}
*/
rhs[k][j][i][0] = rhs[k][j][i][0] - lhsX[0][0][AA][i][j][k]*rhs[k][j][i-1][0]
- lhsX[0][1][AA][i][j][k]*rhs[k][j][i-1][1]
- lhsX[0][2][AA][i][j][k]*rhs[k][j][i-1][2]
- lhsX[0][3][AA][i][j][k]*rhs[k][j][i-1][3]
- lhsX[0][4][AA][i][j][k]*rhs[k][j][i-1][4];
rhs[k][j][i][1] = rhs[k][j][i][1] - lhsX[1][0][AA][i][j][k]*rhs[k][j][i-1][0]
- lhsX[1][1][AA][i][j][k]*rhs[k][j][i-1][1]
- lhsX[1][2][AA][i][j][k]*rhs[k][j][i-1][2]
- lhsX[1][3][AA][i][j][k]*rhs[k][j][i-1][3]
- lhsX[1][4][AA][i][j][k]*rhs[k][j][i-1][4];
rhs[k][j][i][2] = rhs[k][j][i][2] - lhsX[2][0][AA][i][j][k]*rhs[k][j][i-1][0]
- lhsX[2][1][AA][i][j][k]*rhs[k][j][i-1][1]
- lhsX[2][2][AA][i][j][k]*rhs[k][j][i-1][2]
- lhsX[2][3][AA][i][j][k]*rhs[k][j][i-1][3]
- lhsX[2][4][AA][i][j][k]*rhs[k][j][i-1][4];
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsX[3][0][AA][i][j][k]*rhs[k][j][i-1][0]
- lhsX[3][1][AA][i][j][k]*rhs[k][j][i-1][1]
- lhsX[3][2][AA][i][j][k]*rhs[k][j][i-1][2]
- lhsX[3][3][AA][i][j][k]*rhs[k][j][i-1][3]
- lhsX[3][4][AA][i][j][k]*rhs[k][j][i-1][4];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsX[4][0][AA][i][j][k]*rhs[k][j][i-1][0]
- lhsX[4][1][AA][i][j][k]*rhs[k][j][i-1][1]
- lhsX[4][2][AA][i][j][k]*rhs[k][j][i-1][2]
- lhsX[4][3][AA][i][j][k]*rhs[k][j][i-1][3]
- lhsX[4][4][AA][i][j][k]*rhs[k][j][i-1][4];
//-------------------------------------------------------------------
// B(i) = B(i) - C(i-1)*A(i)
//-------------------------------------------------------------------
// matmul_sub(lhsX[i-1][j][AA], lhsX[k][i][j][k][CC], lhsX[k][j][i][BB]);
/*
for(m = 0; m < 5; m++){
for(n = 0; n < 5; n++){
lhsX[n][m][BB][i][j][k] = lhsX[n][m][BB][i][j][k] - lhsX[n][0][AA][i][j][k]*lhsX[0][m][CC][i-1][j][k]
- lhsX[n][1][AA][i][j][k]*lhsX[1][m][CC][i-1][j][k]
- lhsX[n][2][AA][i][j][k]*lhsX[2][m][CC][i-1][j][k]
- lhsX[n][3][AA][i][j][k]*lhsX[3][m][CC][i-1][j][k]
- lhsX[n][4][AA][i][j][k]*lhsX[4][m][CC][i-1][j][k];
}
}
*/
lhsX[0][0][BB][i][j][k] = lhsX[0][0][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k]
- lhsX[0][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k]
- lhsX[0][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k]
- lhsX[0][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k]
- lhsX[0][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k];
lhsX[1][0][BB][i][j][k] = lhsX[1][0][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k]
- lhsX[1][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k]
- lhsX[1][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k]
- lhsX[1][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k]
- lhsX[1][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k];
lhsX[2][0][BB][i][j][k] = lhsX[2][0][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k]
- lhsX[2][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k]
- lhsX[2][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k]
- lhsX[2][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k]
- lhsX[2][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k];
lhsX[3][0][BB][i][j][k] = lhsX[3][0][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k]
- lhsX[3][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k]
- lhsX[3][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k]
- lhsX[3][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k]
- lhsX[3][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k];
lhsX[4][0][BB][i][j][k] = lhsX[4][0][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k]
- lhsX[4][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k]
- lhsX[4][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k]
- lhsX[4][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k]
- lhsX[4][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k];
lhsX[0][1][BB][i][j][k] = lhsX[0][1][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k]
- lhsX[0][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k]
- lhsX[0][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k]
- lhsX[0][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k]
- lhsX[0][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k];
lhsX[1][1][BB][i][j][k] = lhsX[1][1][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k]
- lhsX[1][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k]
- lhsX[1][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k]
- lhsX[1][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k]
- lhsX[1][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k];
lhsX[2][1][BB][i][j][k] = lhsX[2][1][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k]
- lhsX[2][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k]
- lhsX[2][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k]
- lhsX[2][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k]
- lhsX[2][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k];
lhsX[3][1][BB][i][j][k] = lhsX[3][1][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k]
- lhsX[3][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k]
- lhsX[3][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k]
- lhsX[3][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k]
- lhsX[3][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k];
lhsX[4][1][BB][i][j][k] = lhsX[4][1][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k]
- lhsX[4][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k]
- lhsX[4][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k]
- lhsX[4][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k]
- lhsX[4][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k];
lhsX[0][2][BB][i][j][k] = lhsX[0][2][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k]
- lhsX[0][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k]
- lhsX[0][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k]
- lhsX[0][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k]
- lhsX[0][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k];
lhsX[1][2][BB][i][j][k] = lhsX[1][2][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k]
- lhsX[1][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k]
- lhsX[1][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k]
- lhsX[1][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k]
- lhsX[1][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k];
lhsX[2][2][BB][i][j][k] = lhsX[2][2][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k]
- lhsX[2][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k]
- lhsX[2][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k]
- lhsX[2][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k]
- lhsX[2][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k];
lhsX[3][2][BB][i][j][k] = lhsX[3][2][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k]
- lhsX[3][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k]
- lhsX[3][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k]
- lhsX[3][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k]
- lhsX[3][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k];
lhsX[4][2][BB][i][j][k] = lhsX[4][2][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k]
- lhsX[4][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k]
- lhsX[4][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k]
- lhsX[4][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k]
- lhsX[4][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k];
lhsX[0][3][BB][i][j][k] = lhsX[0][3][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k]
- lhsX[0][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k]
- lhsX[0][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k]
- lhsX[0][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k]
- lhsX[0][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k];
lhsX[1][3][BB][i][j][k] = lhsX[1][3][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k]
- lhsX[1][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k]
- lhsX[1][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k]
- lhsX[1][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k]
- lhsX[1][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k];
lhsX[2][3][BB][i][j][k] = lhsX[2][3][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k]
- lhsX[2][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k]
- lhsX[2][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k]
- lhsX[2][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k]
- lhsX[2][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k];
lhsX[3][3][BB][i][j][k] = lhsX[3][3][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k]
- lhsX[3][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k]
- lhsX[3][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k]
- lhsX[3][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k]
- lhsX[3][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k];
lhsX[4][3][BB][i][j][k] = lhsX[4][3][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k]
- lhsX[4][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k]
- lhsX[4][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k]
- lhsX[4][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k]
- lhsX[4][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k];
lhsX[0][4][BB][i][j][k] = lhsX[0][4][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k]
- lhsX[0][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k]
- lhsX[0][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k]
- lhsX[0][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k]
- lhsX[0][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k];
lhsX[1][4][BB][i][j][k] = lhsX[1][4][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k]
- lhsX[1][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k]
- lhsX[1][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k]
- lhsX[1][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k]
- lhsX[1][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k];
lhsX[2][4][BB][i][j][k] = lhsX[2][4][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k]
- lhsX[2][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k]
- lhsX[2][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k]
- lhsX[2][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k]
- lhsX[2][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k];
lhsX[3][4][BB][i][j][k] = lhsX[3][4][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k]
- lhsX[3][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k]
- lhsX[3][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k]
- lhsX[3][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k]
- lhsX[3][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k];
lhsX[4][4][BB][i][j][k] = lhsX[4][4][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k]
- lhsX[4][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k]
- lhsX[4][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k]
- lhsX[4][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k]
- lhsX[4][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k];
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][j][0] by b_inverse[k][j][0] and copy to rhs
//-------------------------------------------------------------------
//binvcrhs( lhsX[i][j][BB], lhsX[k][i][j][k][CC], rhs[k][j][i] );
/*
for(m = 0; m < 5; m++){
pivot = 1.00/lhsX[m][m][BB][i][j][k];
for(n = m+1; n < 5; n++){
lhsX[m][n][BB][i][j][k] = lhsX[m][n][BB][i][j][k]*pivot;
}
lhsX[m][0][CC][i][j][k] = lhsX[m][0][CC][i][j][k]*pivot;
lhsX[m][1][CC][i][j][k] = lhsX[m][1][CC][i][j][k]*pivot;
lhsX[m][2][CC][i][j][k] = lhsX[m][2][CC][i][j][k]*pivot;
lhsX[m][3][CC][i][j][k] = lhsX[m][3][CC][i][j][k]*pivot;
lhsX[m][4][CC][i][j][k] = lhsX[m][4][CC][i][j][k]*pivot;
rhs[k][j][i][m] = rhs[k][j][i][m]*pivot;
for(n = 0; n < 5; n++){
if(n != m){
coeff = lhsX[n][m][BB][i][j][k];
for(z = m+1; z < 5; z++){
lhsX[n][z][BB][i][j][k] = lhsX[n][z][BB][i][j][k] - coeff*lhsX[m][z][BB][i][j][k];
}
lhsX[n][0][CC][i][j][k] = lhsX[n][0][CC][i][j][k] - coeff*lhsX[m][0][CC][i][j][k];
lhsX[n][1][CC][i][j][k] = lhsX[n][1][CC][i][j][k] - coeff*lhsX[m][1][CC][i][j][k];
lhsX[n][2][CC][i][j][k] = lhsX[n][2][CC][i][j][k] - coeff*lhsX[m][2][CC][i][j][k];
lhsX[n][3][CC][i][j][k] = lhsX[n][3][CC][i][j][k] - coeff*lhsX[m][3][CC][i][j][k];
lhsX[n][4][CC][i][j][k] = lhsX[n][4][CC][i][j][k] - coeff*lhsX[m][4][CC][i][j][k];
rhs[k][j][i][n] = rhs[k][j][i][n] - coeff*rhs[k][j][i][m];
}
}
}
*/
pivot = 1.00/lhsX[0][0][BB][i][j][k];
lhsX[0][1][BB][i][j][k] = lhsX[0][1][BB][i][j][k]*pivot;
lhsX[0][2][BB][i][j][k] = lhsX[0][2][BB][i][j][k]*pivot;
lhsX[0][3][BB][i][j][k] = lhsX[0][3][BB][i][j][k]*pivot;
lhsX[0][4][BB][i][j][k] = lhsX[0][4][BB][i][j][k]*pivot;
lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k]*pivot;
lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k]*pivot;
lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k]*pivot;
lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k]*pivot;
lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k]*pivot;
rhs[k][j][i][0] = rhs[k][j][i][0] *pivot;
coeff = lhsX[1][0][BB][i][j][k];
lhsX[1][1][BB][i][j][k]= lhsX[1][1][BB][i][j][k] - coeff*lhsX[0][1][BB][i][j][k];
lhsX[1][2][BB][i][j][k]= lhsX[1][2][BB][i][j][k] - coeff*lhsX[0][2][BB][i][j][k];
lhsX[1][3][BB][i][j][k]= lhsX[1][3][BB][i][j][k] - coeff*lhsX[0][3][BB][i][j][k];
lhsX[1][4][BB][i][j][k]= lhsX[1][4][BB][i][j][k] - coeff*lhsX[0][4][BB][i][j][k];
lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k] - coeff*lhsX[0][0][CC][i][j][k];
lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k] - coeff*lhsX[0][1][CC][i][j][k];
lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k] - coeff*lhsX[0][2][CC][i][j][k];
lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k] - coeff*lhsX[0][3][CC][i][j][k];
lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k] - coeff*lhsX[0][4][CC][i][j][k];
rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][0];
coeff = lhsX[2][0][BB][i][j][k];
lhsX[2][1][BB][i][j][k]= lhsX[2][1][BB][i][j][k] - coeff*lhsX[0][1][BB][i][j][k];
lhsX[2][2][BB][i][j][k]= lhsX[2][2][BB][i][j][k] - coeff*lhsX[0][2][BB][i][j][k];
lhsX[2][3][BB][i][j][k]= lhsX[2][3][BB][i][j][k] - coeff*lhsX[0][3][BB][i][j][k];
lhsX[2][4][BB][i][j][k]= lhsX[2][4][BB][i][j][k] - coeff*lhsX[0][4][BB][i][j][k];
lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k] - coeff*lhsX[0][0][CC][i][j][k];
lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k] - coeff*lhsX[0][1][CC][i][j][k];
lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k] - coeff*lhsX[0][2][CC][i][j][k];
lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k] - coeff*lhsX[0][3][CC][i][j][k];
lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k] - coeff*lhsX[0][4][CC][i][j][k];
rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][0];
coeff = lhsX[3][0][BB][i][j][k];
lhsX[3][1][BB][i][j][k]= lhsX[3][1][BB][i][j][k] - coeff*lhsX[0][1][BB][i][j][k];
lhsX[3][2][BB][i][j][k]= lhsX[3][2][BB][i][j][k] - coeff*lhsX[0][2][BB][i][j][k];
lhsX[3][3][BB][i][j][k]= lhsX[3][3][BB][i][j][k] - coeff*lhsX[0][3][BB][i][j][k];
lhsX[3][4][BB][i][j][k]= lhsX[3][4][BB][i][j][k] - coeff*lhsX[0][4][BB][i][j][k];
lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k] - coeff*lhsX[0][0][CC][i][j][k];
lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k] - coeff*lhsX[0][1][CC][i][j][k];
lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k] - coeff*lhsX[0][2][CC][i][j][k];
lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k] - coeff*lhsX[0][3][CC][i][j][k];
lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k] - coeff*lhsX[0][4][CC][i][j][k];
rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][0];
coeff = lhsX[4][0][BB][i][j][k];
lhsX[4][1][BB][i][j][k]= lhsX[4][1][BB][i][j][k] - coeff*lhsX[0][1][BB][i][j][k];
lhsX[4][2][BB][i][j][k]= lhsX[4][2][BB][i][j][k] - coeff*lhsX[0][2][BB][i][j][k];
lhsX[4][3][BB][i][j][k]= lhsX[4][3][BB][i][j][k] - coeff*lhsX[0][3][BB][i][j][k];
lhsX[4][4][BB][i][j][k]= lhsX[4][4][BB][i][j][k] - coeff*lhsX[0][4][BB][i][j][k];
lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k] - coeff*lhsX[0][0][CC][i][j][k];
lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k] - coeff*lhsX[0][1][CC][i][j][k];
lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k] - coeff*lhsX[0][2][CC][i][j][k];
lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k] - coeff*lhsX[0][3][CC][i][j][k];
lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k] - coeff*lhsX[0][4][CC][i][j][k];
rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][0];
pivot = 1.00/lhsX[1][1][BB][i][j][k];
lhsX[1][2][BB][i][j][k] = lhsX[1][2][BB][i][j][k]*pivot;
lhsX[1][3][BB][i][j][k] = lhsX[1][3][BB][i][j][k]*pivot;
lhsX[1][4][BB][i][j][k] = lhsX[1][4][BB][i][j][k]*pivot;
lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k]*pivot;
lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k]*pivot;
lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k]*pivot;
lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k]*pivot;
lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k]*pivot;
rhs[k][j][i][1] = rhs[k][j][i][1] *pivot;
coeff = lhsX[0][1][BB][i][j][k];
lhsX[0][2][BB][i][j][k]= lhsX[0][2][BB][i][j][k] - coeff*lhsX[1][2][BB][i][j][k];
lhsX[0][3][BB][i][j][k]= lhsX[0][3][BB][i][j][k] - coeff*lhsX[1][3][BB][i][j][k];
lhsX[0][4][BB][i][j][k]= lhsX[0][4][BB][i][j][k] - coeff*lhsX[1][4][BB][i][j][k];
lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k] - coeff*lhsX[1][0][CC][i][j][k];
lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k] - coeff*lhsX[1][1][CC][i][j][k];
lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k] - coeff*lhsX[1][2][CC][i][j][k];
lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k] - coeff*lhsX[1][3][CC][i][j][k];
lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k] - coeff*lhsX[1][4][CC][i][j][k];
rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][1];
coeff = lhsX[2][1][BB][i][j][k];
lhsX[2][2][BB][i][j][k]= lhsX[2][2][BB][i][j][k] - coeff*lhsX[1][2][BB][i][j][k];
lhsX[2][3][BB][i][j][k]= lhsX[2][3][BB][i][j][k] - coeff*lhsX[1][3][BB][i][j][k];
lhsX[2][4][BB][i][j][k]= lhsX[2][4][BB][i][j][k] - coeff*lhsX[1][4][BB][i][j][k];
lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k] - coeff*lhsX[1][0][CC][i][j][k];
lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k] - coeff*lhsX[1][1][CC][i][j][k];
lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k] - coeff*lhsX[1][2][CC][i][j][k];
lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k] - coeff*lhsX[1][3][CC][i][j][k];
lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k] - coeff*lhsX[1][4][CC][i][j][k];
rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][1];
coeff = lhsX[3][1][BB][i][j][k];
lhsX[3][2][BB][i][j][k]= lhsX[3][2][BB][i][j][k] - coeff*lhsX[1][2][BB][i][j][k];
lhsX[3][3][BB][i][j][k]= lhsX[3][3][BB][i][j][k] - coeff*lhsX[1][3][BB][i][j][k];
lhsX[3][4][BB][i][j][k]= lhsX[3][4][BB][i][j][k] - coeff*lhsX[1][4][BB][i][j][k];
lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k] - coeff*lhsX[1][0][CC][i][j][k];
lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k] - coeff*lhsX[1][1][CC][i][j][k];
lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k] - coeff*lhsX[1][2][CC][i][j][k];
lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k] - coeff*lhsX[1][3][CC][i][j][k];
lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k] - coeff*lhsX[1][4][CC][i][j][k];
rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][1];
coeff = lhsX[4][1][BB][i][j][k];
lhsX[4][2][BB][i][j][k]= lhsX[4][2][BB][i][j][k] - coeff*lhsX[1][2][BB][i][j][k];
lhsX[4][3][BB][i][j][k]= lhsX[4][3][BB][i][j][k] - coeff*lhsX[1][3][BB][i][j][k];
lhsX[4][4][BB][i][j][k]= lhsX[4][4][BB][i][j][k] - coeff*lhsX[1][4][BB][i][j][k];
lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k] - coeff*lhsX[1][0][CC][i][j][k];
lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k] - coeff*lhsX[1][1][CC][i][j][k];
lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k] - coeff*lhsX[1][2][CC][i][j][k];
lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k] - coeff*lhsX[1][3][CC][i][j][k];
lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k] - coeff*lhsX[1][4][CC][i][j][k];
rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][1];
pivot = 1.00/lhsX[2][2][BB][i][j][k];
lhsX[2][3][BB][i][j][k] = lhsX[2][3][BB][i][j][k]*pivot;
lhsX[2][4][BB][i][j][k] = lhsX[2][4][BB][i][j][k]*pivot;
lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k]*pivot;
lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k]*pivot;
lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k]*pivot;
lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k]*pivot;
lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k]*pivot;
rhs[k][j][i][2] = rhs[k][j][i][2] *pivot;
coeff = lhsX[0][2][BB][i][j][k];
lhsX[0][3][BB][i][j][k]= lhsX[0][3][BB][i][j][k] - coeff*lhsX[2][3][BB][i][j][k];
lhsX[0][4][BB][i][j][k]= lhsX[0][4][BB][i][j][k] - coeff*lhsX[2][4][BB][i][j][k];
lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k] - coeff*lhsX[2][0][CC][i][j][k];
lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k] - coeff*lhsX[2][1][CC][i][j][k];
lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k] - coeff*lhsX[2][2][CC][i][j][k];
lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k] - coeff*lhsX[2][3][CC][i][j][k];
lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k] - coeff*lhsX[2][4][CC][i][j][k];
rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][2];
coeff = lhsX[1][2][BB][i][j][k];
lhsX[1][3][BB][i][j][k]= lhsX[1][3][BB][i][j][k] - coeff*lhsX[2][3][BB][i][j][k];
lhsX[1][4][BB][i][j][k]= lhsX[1][4][BB][i][j][k] - coeff*lhsX[2][4][BB][i][j][k];
lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k] - coeff*lhsX[2][0][CC][i][j][k];
lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k] - coeff*lhsX[2][1][CC][i][j][k];
lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k] - coeff*lhsX[2][2][CC][i][j][k];
lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k] - coeff*lhsX[2][3][CC][i][j][k];
lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k] - coeff*lhsX[2][4][CC][i][j][k];
rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][2];
coeff = lhsX[3][2][BB][i][j][k];
lhsX[3][3][BB][i][j][k]= lhsX[3][3][BB][i][j][k] - coeff*lhsX[2][3][BB][i][j][k];
lhsX[3][4][BB][i][j][k]= lhsX[3][4][BB][i][j][k] - coeff*lhsX[2][4][BB][i][j][k];
lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k] - coeff*lhsX[2][0][CC][i][j][k];
lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k] - coeff*lhsX[2][1][CC][i][j][k];
lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k] - coeff*lhsX[2][2][CC][i][j][k];
lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k] - coeff*lhsX[2][3][CC][i][j][k];
lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k] - coeff*lhsX[2][4][CC][i][j][k];
rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][2];
coeff = lhsX[4][2][BB][i][j][k];
lhsX[4][3][BB][i][j][k]= lhsX[4][3][BB][i][j][k] - coeff*lhsX[2][3][BB][i][j][k];
lhsX[4][4][BB][i][j][k]= lhsX[4][4][BB][i][j][k] - coeff*lhsX[2][4][BB][i][j][k];
lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k] - coeff*lhsX[2][0][CC][i][j][k];
lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k] - coeff*lhsX[2][1][CC][i][j][k];
lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k] - coeff*lhsX[2][2][CC][i][j][k];
lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k] - coeff*lhsX[2][3][CC][i][j][k];
lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k] - coeff*lhsX[2][4][CC][i][j][k];
rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][2];
pivot = 1.00/lhsX[3][3][BB][i][j][k];
lhsX[3][4][BB][i][j][k] = lhsX[3][4][BB][i][j][k]*pivot;
lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k]*pivot;
lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k]*pivot;
lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k]*pivot;
lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k]*pivot;
lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k]*pivot;
rhs[k][j][i][3] = rhs[k][j][i][3] *pivot;
coeff = lhsX[0][3][BB][i][j][k];
lhsX[0][4][BB][i][j][k]= lhsX[0][4][BB][i][j][k] - coeff*lhsX[3][4][BB][i][j][k];
lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k] - coeff*lhsX[3][0][CC][i][j][k];
lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k] - coeff*lhsX[3][1][CC][i][j][k];
lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k] - coeff*lhsX[3][2][CC][i][j][k];
lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k] - coeff*lhsX[3][3][CC][i][j][k];
lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k] - coeff*lhsX[3][4][CC][i][j][k];
rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][3];
coeff = lhsX[1][3][BB][i][j][k];
lhsX[1][4][BB][i][j][k]= lhsX[1][4][BB][i][j][k] - coeff*lhsX[3][4][BB][i][j][k];
lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k] - coeff*lhsX[3][0][CC][i][j][k];
lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k] - coeff*lhsX[3][1][CC][i][j][k];
lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k] - coeff*lhsX[3][2][CC][i][j][k];
lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k] - coeff*lhsX[3][3][CC][i][j][k];
lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k] - coeff*lhsX[3][4][CC][i][j][k];
rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][3];
coeff = lhsX[2][3][BB][i][j][k];
lhsX[2][4][BB][i][j][k]= lhsX[2][4][BB][i][j][k] - coeff*lhsX[3][4][BB][i][j][k];
lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k] - coeff*lhsX[3][0][CC][i][j][k];
lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k] - coeff*lhsX[3][1][CC][i][j][k];
lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k] - coeff*lhsX[3][2][CC][i][j][k];
lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k] - coeff*lhsX[3][3][CC][i][j][k];
lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k] - coeff*lhsX[3][4][CC][i][j][k];
rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][3];
coeff = lhsX[4][3][BB][i][j][k];
lhsX[4][4][BB][i][j][k]= lhsX[4][4][BB][i][j][k] - coeff*lhsX[3][4][BB][i][j][k];
lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k] - coeff*lhsX[3][0][CC][i][j][k];
lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k] - coeff*lhsX[3][1][CC][i][j][k];
lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k] - coeff*lhsX[3][2][CC][i][j][k];
lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k] - coeff*lhsX[3][3][CC][i][j][k];
lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k] - coeff*lhsX[3][4][CC][i][j][k];
rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][3];
pivot = 1.00/lhsX[4][4][BB][i][j][k];
lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k]*pivot;
lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k]*pivot;
lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k]*pivot;
lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k]*pivot;
lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k]*pivot;
rhs[k][j][i][4] = rhs[k][j][i][4] *pivot;
coeff = lhsX[0][4][BB][i][j][k];
lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k] - coeff*lhsX[4][0][CC][i][j][k];
lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k] - coeff*lhsX[4][1][CC][i][j][k];
lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k] - coeff*lhsX[4][2][CC][i][j][k];
lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k] - coeff*lhsX[4][3][CC][i][j][k];
lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k] - coeff*lhsX[4][4][CC][i][j][k];
rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][4];
coeff = lhsX[1][4][BB][i][j][k];
lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k] - coeff*lhsX[4][0][CC][i][j][k];
lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k] - coeff*lhsX[4][1][CC][i][j][k];
lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k] - coeff*lhsX[4][2][CC][i][j][k];
lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k] - coeff*lhsX[4][3][CC][i][j][k];
lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k] - coeff*lhsX[4][4][CC][i][j][k];
rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][4];
coeff = lhsX[2][4][BB][i][j][k];
lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k] - coeff*lhsX[4][0][CC][i][j][k];
lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k] - coeff*lhsX[4][1][CC][i][j][k];
lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k] - coeff*lhsX[4][2][CC][i][j][k];
lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k] - coeff*lhsX[4][3][CC][i][j][k];
lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k] - coeff*lhsX[4][4][CC][i][j][k];
rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][4];
coeff = lhsX[3][4][BB][i][j][k];
lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k] - coeff*lhsX[4][0][CC][i][j][k];
lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k] - coeff*lhsX[4][1][CC][i][j][k];
lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k] - coeff*lhsX[4][2][CC][i][j][k];
lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k] - coeff*lhsX[4][3][CC][i][j][k];
lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k] - coeff*lhsX[4][4][CC][i][j][k];
rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][4];
}/*end i*/
}
}
#endif
//---------------------------------------------------------------------
// rhs(isize) = rhs(isize) - A*rhs(isize-1)
//---------------------------------------------------------------------
//matvec_sub(lhsX[isize-1][j][AA], rhs[k][isize][j][k], rhs[k][j][isize]);
size_t kernel_x_solve_6_off[2] = { 1, 1 };
size_t kernel_x_solve_6_idx[2] = { gp12, gp22 };
brisbane_kernel kernel_x_solve_6;
brisbane_kernel_create("x_solve_6", &kernel_x_solve_6);
brisbane_kernel_setmem(kernel_x_solve_6, 0, mem_lhsX, brisbane_rw);
brisbane_kernel_setmem(kernel_x_solve_6, 1, mem_rhs, brisbane_rw);
brisbane_kernel_setarg(kernel_x_solve_6, 2, sizeof(int), &isize);
brisbane_task task6;
brisbane_task_create(&task6);
brisbane_task_kernel(task6, kernel_x_solve_6, 2, kernel_x_solve_6_off, kernel_x_solve_6_idx);
brisbane_task_submit(task6, brisbane_cpu, NULL, true);
#if 0
#pragma omp target teams distribute parallel for collapse(2) private(k,j)
for (k = 1; k <= gp22; k++) {
for (j = 1; j <= gp12; j++) {
/*
for(m = 0; m < 5; m++){
rhs[k][j][isize][m] = rhs[k][j][isize][m] - lhsX[m][0][AA][isize][j][k]*rhs[k][j][isize-1][0]
- lhsX[m][1][AA][isize][j][k]*rhs[k][j][isize-1][1]
- lhsX[m][2][AA][isize][j][k]*rhs[k][j][isize-1][2]
- lhsX[m][3][AA][isize][j][k]*rhs[k][j][isize-1][3]
- lhsX[m][4][AA][isize][j][k]*rhs[k][j][isize-1][4];
}
*/
rhs[k][j][isize][0] = rhs[k][j][isize][0] - lhsX[0][0][AA][isize][j][k]*rhs[k][j][isize-1][0]
- lhsX[0][1][AA][isize][j][k]*rhs[k][j][isize-1][1]
- lhsX[0][2][AA][isize][j][k]*rhs[k][j][isize-1][2]
- lhsX[0][3][AA][isize][j][k]*rhs[k][j][isize-1][3]
- lhsX[0][4][AA][isize][j][k]*rhs[k][j][isize-1][4];
rhs[k][j][isize][1] = rhs[k][j][isize][1] - lhsX[1][0][AA][isize][j][k]*rhs[k][j][isize-1][0]
- lhsX[1][1][AA][isize][j][k]*rhs[k][j][isize-1][1]
- lhsX[1][2][AA][isize][j][k]*rhs[k][j][isize-1][2]
- lhsX[1][3][AA][isize][j][k]*rhs[k][j][isize-1][3]
- lhsX[1][4][AA][isize][j][k]*rhs[k][j][isize-1][4];
rhs[k][j][isize][2] = rhs[k][j][isize][2] - lhsX[2][0][AA][isize][j][k]*rhs[k][j][isize-1][0]
- lhsX[2][1][AA][isize][j][k]*rhs[k][j][isize-1][1]
- lhsX[2][2][AA][isize][j][k]*rhs[k][j][isize-1][2]
- lhsX[2][3][AA][isize][j][k]*rhs[k][j][isize-1][3]
- lhsX[2][4][AA][isize][j][k]*rhs[k][j][isize-1][4];
rhs[k][j][isize][3] = rhs[k][j][isize][3] - lhsX[3][0][AA][isize][j][k]*rhs[k][j][isize-1][0]
- lhsX[3][1][AA][isize][j][k]*rhs[k][j][isize-1][1]
- lhsX[3][2][AA][isize][j][k]*rhs[k][j][isize-1][2]
- lhsX[3][3][AA][isize][j][k]*rhs[k][j][isize-1][3]
- lhsX[3][4][AA][isize][j][k]*rhs[k][j][isize-1][4];
rhs[k][j][isize][4] = rhs[k][j][isize][4] - lhsX[4][0][AA][isize][j][k]*rhs[k][j][isize-1][0]
- lhsX[4][1][AA][isize][j][k]*rhs[k][j][isize-1][1]
- lhsX[4][2][AA][isize][j][k]*rhs[k][j][isize-1][2]
- lhsX[4][3][AA][isize][j][k]*rhs[k][j][isize-1][3]
- lhsX[4][4][AA][isize][j][k]*rhs[k][j][isize-1][4];
}
}
#endif
//---------------------------------------------------------------------
// B(isize) = B(isize) - C(isize-1)*A(isize)
//---------------------------------------------------------------------
//matmul_sub(lhsX[isize-1][j][AA], lhsX[k][isize][j][k][CC], lhsX[k][j][isize][BB]);
size_t kernel_x_solve_7_off[2] = { 1, 1 };
size_t kernel_x_solve_7_idx[2] = { gp12, gp22 };
brisbane_kernel kernel_x_solve_7;
brisbane_kernel_create("x_solve_7", &kernel_x_solve_7);
brisbane_kernel_setmem(kernel_x_solve_7, 0, mem_lhsX, brisbane_rw);
brisbane_kernel_setarg(kernel_x_solve_7, 1, sizeof(int), &isize);
brisbane_task task7;
brisbane_task_create(&task7);
brisbane_task_kernel(task7, kernel_x_solve_7, 2, kernel_x_solve_7_off, kernel_x_solve_7_idx);
brisbane_task_submit(task7, brisbane_cpu, NULL, true);
#if 0
#pragma omp target teams distribute parallel for collapse(2) private(k,j)
for (k = 1; k <= gp22; k++) {
for (j = 1; j <= gp12; j++) {
/*
for(m = 0; m < 5; m++){
for(n = 0; n < 5; n++){
lhsX[n][m][BB][isize][j][k] = lhsX[n][m][BB][isize][j][k] - lhsX[n][0][AA][isize][j][k]*lhsX[0][m][CC][isize-1][j][k]
- lhsX[n][1][AA][isize][j][k]*lhsX[1][m][CC][isize-1][j][k]
- lhsX[n][2][AA][isize][j][k]*lhsX[2][m][CC][isize-1][j][k]
- lhsX[n][3][AA][isize][j][k]*lhsX[3][m][CC][isize-1][j][k]
- lhsX[n][4][AA][isize][j][k]*lhsX[4][m][CC][isize-1][j][k];
}
}
*/
lhsX[0][0][BB][isize][j][k] = lhsX[0][0][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k]
- lhsX[0][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k]
- lhsX[0][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k]
- lhsX[0][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k]
- lhsX[0][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k];
lhsX[1][0][BB][isize][j][k] = lhsX[1][0][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k]
- lhsX[1][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k]
- lhsX[1][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k]
- lhsX[1][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k]
- lhsX[1][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k];
lhsX[2][0][BB][isize][j][k] = lhsX[2][0][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k]
- lhsX[2][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k]
- lhsX[2][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k]
- lhsX[2][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k]
- lhsX[2][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k];
lhsX[3][0][BB][isize][j][k] = lhsX[3][0][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k]
- lhsX[3][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k]
- lhsX[3][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k]
- lhsX[3][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k]
- lhsX[3][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k];
lhsX[4][0][BB][isize][j][k] = lhsX[4][0][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k]
- lhsX[4][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k]
- lhsX[4][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k]
- lhsX[4][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k]
- lhsX[4][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k];
lhsX[0][1][BB][isize][j][k] = lhsX[0][1][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k]
- lhsX[0][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k]
- lhsX[0][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k]
- lhsX[0][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k]
- lhsX[0][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k];
lhsX[1][1][BB][isize][j][k] = lhsX[1][1][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k]
- lhsX[1][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k]
- lhsX[1][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k]
- lhsX[1][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k]
- lhsX[1][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k];
lhsX[2][1][BB][isize][j][k] = lhsX[2][1][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k]
- lhsX[2][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k]
- lhsX[2][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k]
- lhsX[2][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k]
- lhsX[2][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k];
lhsX[3][1][BB][isize][j][k] = lhsX[3][1][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k]
- lhsX[3][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k]
- lhsX[3][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k]
- lhsX[3][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k]
- lhsX[3][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k];
lhsX[4][1][BB][isize][j][k] = lhsX[4][1][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k]
- lhsX[4][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k]
- lhsX[4][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k]
- lhsX[4][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k]
- lhsX[4][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k];
lhsX[0][2][BB][isize][j][k] = lhsX[0][2][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k]
- lhsX[0][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k]
- lhsX[0][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k]
- lhsX[0][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k]
- lhsX[0][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k];
lhsX[1][2][BB][isize][j][k] = lhsX[1][2][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k]
- lhsX[1][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k]
- lhsX[1][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k]
- lhsX[1][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k]
- lhsX[1][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k];
lhsX[2][2][BB][isize][j][k] = lhsX[2][2][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k]
- lhsX[2][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k]
- lhsX[2][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k]
- lhsX[2][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k]
- lhsX[2][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k];
lhsX[3][2][BB][isize][j][k] = lhsX[3][2][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k]
- lhsX[3][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k]
- lhsX[3][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k]
- lhsX[3][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k]
- lhsX[3][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k];
lhsX[4][2][BB][isize][j][k] = lhsX[4][2][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k]
- lhsX[4][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k]
- lhsX[4][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k]
- lhsX[4][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k]
- lhsX[4][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k];
lhsX[0][3][BB][isize][j][k] = lhsX[0][3][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k]
- lhsX[0][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k]
- lhsX[0][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k]
- lhsX[0][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k]
- lhsX[0][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k];
lhsX[1][3][BB][isize][j][k] = lhsX[1][3][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k]
- lhsX[1][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k]
- lhsX[1][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k]
- lhsX[1][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k]
- lhsX[1][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k];
lhsX[2][3][BB][isize][j][k] = lhsX[2][3][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k]
- lhsX[2][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k]
- lhsX[2][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k]
- lhsX[2][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k]
- lhsX[2][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k];
lhsX[3][3][BB][isize][j][k] = lhsX[3][3][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k]
- lhsX[3][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k]
- lhsX[3][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k]
- lhsX[3][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k]
- lhsX[3][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k];
lhsX[4][3][BB][isize][j][k] = lhsX[4][3][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k]
- lhsX[4][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k]
- lhsX[4][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k]
- lhsX[4][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k]
- lhsX[4][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k];
lhsX[0][4][BB][isize][j][k] = lhsX[0][4][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k]
- lhsX[0][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k]
- lhsX[0][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k]
- lhsX[0][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k]
- lhsX[0][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k];
lhsX[1][4][BB][isize][j][k] = lhsX[1][4][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k]
- lhsX[1][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k]
- lhsX[1][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k]
- lhsX[1][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k]
- lhsX[1][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k];
lhsX[2][4][BB][isize][j][k] = lhsX[2][4][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k]
- lhsX[2][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k]
- lhsX[2][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k]
- lhsX[2][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k]
- lhsX[2][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k];
lhsX[3][4][BB][isize][j][k] = lhsX[3][4][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k]
- lhsX[3][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k]
- lhsX[3][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k]
- lhsX[3][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k]
- lhsX[3][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k];
lhsX[4][4][BB][isize][j][k] = lhsX[4][4][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k]
- lhsX[4][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k]
- lhsX[4][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k]
- lhsX[4][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k]
- lhsX[4][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k];
}
}
#endif
//---------------------------------------------------------------------
// multiply rhs() by b_inverse() and copy to rhs
//---------------------------------------------------------------------
//binvrhs( lhsX[isize][j][BB], rhs[k][isize][j][k] );
size_t kernel_x_solve_8_off[1] = { 1 };
size_t kernel_x_solve_8_idx[1] = { gp22 };
brisbane_kernel kernel_x_solve_8;
brisbane_kernel_create("x_solve_8", &kernel_x_solve_8);
brisbane_kernel_setmem(kernel_x_solve_8, 0, mem_lhsX, brisbane_rw);
brisbane_kernel_setmem(kernel_x_solve_8, 1, mem_rhs, brisbane_rw);
brisbane_kernel_setarg(kernel_x_solve_8, 2, sizeof(int), &isize);
brisbane_task task8;
brisbane_task_create(&task8);
brisbane_task_kernel(task8, kernel_x_solve_8, 1, kernel_x_solve_8_off, kernel_x_solve_8_idx);
brisbane_task_submit(task8, brisbane_cpu, NULL, true);
#if 0
#pragma omp target teams distribute parallel for private(j,k,pivot,coeff)
for (k = 1; k <= gp22; k++) {
for (j = 1; j <= gp12; j++) {
/*
for(m = 0; m < 5; m++){
pivot = 1.00/lhsX[m][m][BB][isize][j][k];
for(n = m+1; n < 5; n++){
lhsX[m][n][BB][isize][j][k] = lhsX[m][n][BB][isize][j][k]*pivot;
}
rhs[k][j][isize][m] = rhs[k][j][isize][m]*pivot;
for(n = 0; n < 5; n++){
if(n != m){
coeff = lhsX[n][m][BB][isize][j][k];
for(z = m+1; z < 5; z++){
lhsX[n][z][BB][isize][j][k] = lhsX[n][z][BB][isize][j][k] - coeff*lhsX[m][z][BB][isize][j][k];
}
rhs[k][j][isize][n] = rhs[k][j][isize][n] - coeff*rhs[k][j][isize][m];
}
}
}
*/
pivot = 1.00/lhsX[0][0][BB][isize][j][k];
lhsX[0][1][BB][isize][j][k] = lhsX[0][1][BB][isize][j][k]*pivot;
lhsX[0][2][BB][isize][j][k] = lhsX[0][2][BB][isize][j][k]*pivot;
lhsX[0][3][BB][isize][j][k] = lhsX[0][3][BB][isize][j][k]*pivot;
lhsX[0][4][BB][isize][j][k] = lhsX[0][4][BB][isize][j][k]*pivot;
rhs[k][j][isize][0] = rhs[k][j][isize][0] *pivot;
coeff = lhsX[1][0][BB][isize][j][k];
lhsX[1][1][BB][isize][j][k]= lhsX[1][1][BB][isize][j][k] - coeff*lhsX[0][1][BB][isize][j][k];
lhsX[1][2][BB][isize][j][k]= lhsX[1][2][BB][isize][j][k] - coeff*lhsX[0][2][BB][isize][j][k];
lhsX[1][3][BB][isize][j][k]= lhsX[1][3][BB][isize][j][k] - coeff*lhsX[0][3][BB][isize][j][k];
lhsX[1][4][BB][isize][j][k]= lhsX[1][4][BB][isize][j][k] - coeff*lhsX[0][4][BB][isize][j][k];
rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff*rhs[k][j][isize][0];
coeff = lhsX[2][0][BB][isize][j][k];
lhsX[2][1][BB][isize][j][k]= lhsX[2][1][BB][isize][j][k] - coeff*lhsX[0][1][BB][isize][j][k];
lhsX[2][2][BB][isize][j][k]= lhsX[2][2][BB][isize][j][k] - coeff*lhsX[0][2][BB][isize][j][k];
lhsX[2][3][BB][isize][j][k]= lhsX[2][3][BB][isize][j][k] - coeff*lhsX[0][3][BB][isize][j][k];
lhsX[2][4][BB][isize][j][k]= lhsX[2][4][BB][isize][j][k] - coeff*lhsX[0][4][BB][isize][j][k];
rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff*rhs[k][j][isize][0];
coeff = lhsX[3][0][BB][isize][j][k];
lhsX[3][1][BB][isize][j][k]= lhsX[3][1][BB][isize][j][k] - coeff*lhsX[0][1][BB][isize][j][k];
lhsX[3][2][BB][isize][j][k]= lhsX[3][2][BB][isize][j][k] - coeff*lhsX[0][2][BB][isize][j][k];
lhsX[3][3][BB][isize][j][k]= lhsX[3][3][BB][isize][j][k] - coeff*lhsX[0][3][BB][isize][j][k];
lhsX[3][4][BB][isize][j][k]= lhsX[3][4][BB][isize][j][k] - coeff*lhsX[0][4][BB][isize][j][k];
rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff*rhs[k][j][isize][0];
coeff = lhsX[4][0][BB][isize][j][k];
lhsX[4][1][BB][isize][j][k]= lhsX[4][1][BB][isize][j][k] - coeff*lhsX[0][1][BB][isize][j][k];
lhsX[4][2][BB][isize][j][k]= lhsX[4][2][BB][isize][j][k] - coeff*lhsX[0][2][BB][isize][j][k];
lhsX[4][3][BB][isize][j][k]= lhsX[4][3][BB][isize][j][k] - coeff*lhsX[0][3][BB][isize][j][k];
lhsX[4][4][BB][isize][j][k]= lhsX[4][4][BB][isize][j][k] - coeff*lhsX[0][4][BB][isize][j][k];
rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff*rhs[k][j][isize][0];
pivot = 1.00/lhsX[1][1][BB][isize][j][k];
lhsX[1][2][BB][isize][j][k] = lhsX[1][2][BB][isize][j][k]*pivot;
lhsX[1][3][BB][isize][j][k] = lhsX[1][3][BB][isize][j][k]*pivot;
lhsX[1][4][BB][isize][j][k] = lhsX[1][4][BB][isize][j][k]*pivot;
rhs[k][j][isize][1] = rhs[k][j][isize][1] *pivot;
coeff = lhsX[0][1][BB][isize][j][k];
lhsX[0][2][BB][isize][j][k]= lhsX[0][2][BB][isize][j][k] - coeff*lhsX[1][2][BB][isize][j][k];
lhsX[0][3][BB][isize][j][k]= lhsX[0][3][BB][isize][j][k] - coeff*lhsX[1][3][BB][isize][j][k];
lhsX[0][4][BB][isize][j][k]= lhsX[0][4][BB][isize][j][k] - coeff*lhsX[1][4][BB][isize][j][k];
rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff*rhs[k][j][isize][1];
coeff = lhsX[2][1][BB][isize][j][k];
lhsX[2][2][BB][isize][j][k]= lhsX[2][2][BB][isize][j][k] - coeff*lhsX[1][2][BB][isize][j][k];
lhsX[2][3][BB][isize][j][k]= lhsX[2][3][BB][isize][j][k] - coeff*lhsX[1][3][BB][isize][j][k];
lhsX[2][4][BB][isize][j][k]= lhsX[2][4][BB][isize][j][k] - coeff*lhsX[1][4][BB][isize][j][k];
rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff*rhs[k][j][isize][1];
coeff = lhsX[3][1][BB][isize][j][k];
lhsX[3][2][BB][isize][j][k]= lhsX[3][2][BB][isize][j][k] - coeff*lhsX[1][2][BB][isize][j][k];
lhsX[3][3][BB][isize][j][k]= lhsX[3][3][BB][isize][j][k] - coeff*lhsX[1][3][BB][isize][j][k];
lhsX[3][4][BB][isize][j][k]= lhsX[3][4][BB][isize][j][k] - coeff*lhsX[1][4][BB][isize][j][k];
rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff*rhs[k][j][isize][1];
coeff = lhsX[4][1][BB][isize][j][k];
lhsX[4][2][BB][isize][j][k]= lhsX[4][2][BB][isize][j][k] - coeff*lhsX[1][2][BB][isize][j][k];
lhsX[4][3][BB][isize][j][k]= lhsX[4][3][BB][isize][j][k] - coeff*lhsX[1][3][BB][isize][j][k];
lhsX[4][4][BB][isize][j][k]= lhsX[4][4][BB][isize][j][k] - coeff*lhsX[1][4][BB][isize][j][k];
rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff*rhs[k][j][isize][1];
pivot = 1.00/lhsX[2][2][BB][isize][j][k];
lhsX[2][3][BB][isize][j][k] = lhsX[2][3][BB][isize][j][k]*pivot;
lhsX[2][4][BB][isize][j][k] = lhsX[2][4][BB][isize][j][k]*pivot;
rhs[k][j][isize][2] = rhs[k][j][isize][2] *pivot;
coeff = lhsX[0][2][BB][isize][j][k];
lhsX[0][3][BB][isize][j][k]= lhsX[0][3][BB][isize][j][k] - coeff*lhsX[2][3][BB][isize][j][k];
lhsX[0][4][BB][isize][j][k]= lhsX[0][4][BB][isize][j][k] - coeff*lhsX[2][4][BB][isize][j][k];
rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff*rhs[k][j][isize][2];
coeff = lhsX[1][2][BB][isize][j][k];
lhsX[1][3][BB][isize][j][k]= lhsX[1][3][BB][isize][j][k] - coeff*lhsX[2][3][BB][isize][j][k];
lhsX[1][4][BB][isize][j][k]= lhsX[1][4][BB][isize][j][k] - coeff*lhsX[2][4][BB][isize][j][k];
rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff*rhs[k][j][isize][2];
coeff = lhsX[3][2][BB][isize][j][k];
lhsX[3][3][BB][isize][j][k]= lhsX[3][3][BB][isize][j][k] - coeff*lhsX[2][3][BB][isize][j][k];
lhsX[3][4][BB][isize][j][k]= lhsX[3][4][BB][isize][j][k] - coeff*lhsX[2][4][BB][isize][j][k];
rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff*rhs[k][j][isize][2];
coeff = lhsX[4][2][BB][isize][j][k];
lhsX[4][3][BB][isize][j][k]= lhsX[4][3][BB][isize][j][k] - coeff*lhsX[2][3][BB][isize][j][k];
lhsX[4][4][BB][isize][j][k]= lhsX[4][4][BB][isize][j][k] - coeff*lhsX[2][4][BB][isize][j][k];
rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff*rhs[k][j][isize][2];
pivot = 1.00/lhsX[3][3][BB][isize][j][k];
lhsX[3][4][BB][isize][j][k] = lhsX[3][4][BB][isize][j][k]*pivot;
rhs[k][j][isize][3] = rhs[k][j][isize][3] *pivot;
coeff = lhsX[0][3][BB][isize][j][k];
lhsX[0][4][BB][isize][j][k]= lhsX[0][4][BB][isize][j][k] - coeff*lhsX[3][4][BB][isize][j][k];
rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff*rhs[k][j][isize][3];
coeff = lhsX[1][3][BB][isize][j][k];
lhsX[1][4][BB][isize][j][k]= lhsX[1][4][BB][isize][j][k] - coeff*lhsX[3][4][BB][isize][j][k];
rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff*rhs[k][j][isize][3];
coeff = lhsX[2][3][BB][isize][j][k];
lhsX[2][4][BB][isize][j][k]= lhsX[2][4][BB][isize][j][k] - coeff*lhsX[3][4][BB][isize][j][k];
rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff*rhs[k][j][isize][3];
coeff = lhsX[4][3][BB][isize][j][k];
lhsX[4][4][BB][isize][j][k]= lhsX[4][4][BB][isize][j][k] - coeff*lhsX[3][4][BB][isize][j][k];
rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff*rhs[k][j][isize][3];
pivot = 1.00/lhsX[4][4][BB][isize][j][k];
rhs[k][j][isize][4] = rhs[k][j][isize][4] *pivot;
coeff = lhsX[0][4][BB][isize][j][k];
rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff*rhs[k][j][isize][4];
coeff = lhsX[1][4][BB][isize][j][k];
rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff*rhs[k][j][isize][4];
coeff = lhsX[2][4][BB][isize][j][k];
rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff*rhs[k][j][isize][4];
coeff = lhsX[3][4][BB][isize][j][k];
rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff*rhs[k][j][isize][4];
}
}
#endif
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(isize)=rhs(isize)
// else assume U(isize) is loaded in un pack backsub_info
// so just use it
// after u(istart) will be sent to next cell
//---------------------------------------------------------------------
size_t kernel_x_solve_9_off[2] = { 1, 1 };
size_t kernel_x_solve_9_idx[2] = { gp12, gp22 };
brisbane_kernel kernel_x_solve_9;
brisbane_kernel_create("x_solve_9", &kernel_x_solve_9);
brisbane_kernel_setmem(kernel_x_solve_9, 0, mem_lhsX, brisbane_r);
brisbane_kernel_setmem(kernel_x_solve_9, 1, mem_rhs, brisbane_rw);
brisbane_kernel_setarg(kernel_x_solve_9, 2, sizeof(int), &isize);
brisbane_task task9;
brisbane_task_create(&task9);
brisbane_task_kernel(task9, kernel_x_solve_9, 2, kernel_x_solve_9_off, kernel_x_solve_9_idx);
//brisbane_task_submit(task9, brisbane_cpu, NULL, true);
#if 1
brisbane_task task10;
brisbane_task_create(&task10);
brisbane_task_d2h_full(task10, mem_rhs, rhs);
brisbane_task_d2h_full(task10, mem_lhsX, lhsX);
brisbane_task_submit(task10, brisbane_cpu, NULL, true);
#pragma omp target teams distribute parallel for collapse(2) private(i,j,k,m,n)
for (k = 1; k <= gp22; k++) {
for (j = 1; j <= gp12; j++) {
for (i = isize-1; i >=0; i--) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhsX[m][n][CC][i][j][k]*rhs[k][j][i+1][n];
}
}
}
}
}
brisbane_task task11;
brisbane_task_create(&task11);
brisbane_task_h2d_full(task11, mem_rhs, rhs);
brisbane_task_submit(task11, brisbane_cpu, NULL, true);
#endif
}/*end omp target data */
brisbane_mem_release(mem_fjacX);
brisbane_mem_release(mem_njacX);
brisbane_mem_release(mem_lhsX);
}
|
kohonen_som_topology.c | /**
* \file
* \brief [Kohonen self organizing
* map](https://en.wikipedia.org/wiki/Self-organizing_map) (topological map)
*
* This example implements a powerful unsupervised learning algorithm called as
* a self organizing map. The algorithm creates a connected network of weights
* that closely follows the given data points. This thus creates a topological
* map of the given data i.e., it maintains the relationship between various
* data points in a much higher dimensional space by creating an equivalent in a
* 2-dimensional space.
* <img alt="Trained topological maps for the test cases in the program"
* src="https://raw.githubusercontent.com/TheAlgorithms/C/docs/images/machine_learning/kohonen/2D_Kohonen_SOM.svg"
* />
* \author [Krishna Vedala](https://github.com/kvedala)
* \warning MSVC 2019 compiler generates code that does not execute as expected.
* However, MinGW, Clang for GCC and Clang for MSVC compilers on windows perform
* as expected. Any insights and suggestions should be directed to the author.
* \see kohonen_som_trace.c
*/
#define _USE_MATH_DEFINES /**< required for MS Visual C */
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifdef _OPENMP // check if OpenMP based parallellization is available
#include <omp.h>
#endif
/**
* @addtogroup machine_learning Machine learning algorithms
* @{
* @addtogroup kohonen_2d Kohonen SOM topology algorithm
* @{
*/
#ifndef max
/** shorthand for maximum value */
#define max(a, b) (((a) > (b)) ? (a) : (b))
#endif
#ifndef min
/** shorthand for minimum value */
#define min(a, b) (((a) < (b)) ? (a) : (b))
#endif
/** to store info regarding 3D arrays */
struct kohonen_array_3d
{
int dim1; /**< lengths of first dimension */
int dim2; /**< lengths of second dimension */
int dim3; /**< lengths of thirddimension */
double *data; /**< pointer to data */
};
/** Function that returns the pointer to (x, y, z) ^th location in the
* linear 3D array given by:
* \f[
* X_{i,j,k} = i\times M\times N + j\times N + k
* \f]
* where \f$L\f$, \f$M\f$ and \f$N\f$ are the 3D matrix dimensions.
* \param[in] arr pointer to ::kohonen_array_3d structure
* \param[in] x first index
* \param[in] y second index
* \param[in] z third index
* \returns pointer to (x,y,z)^th location of data
*/
double *kohonen_data_3d(const struct kohonen_array_3d *arr, int x, int y, int z)
{
int offset = (x * arr->dim2 * arr->dim3) + (y * arr->dim3) + z;
return arr->data + offset;
}
/**
* Helper function to generate a random number in a given interval.
* \n Steps:
* 1. `r1 = rand() % 100` gets a random number between 0 and 99
* 2. `r2 = r1 / 100` converts random number to be between 0 and 0.99
* 3. scale and offset the random number to given range of \f$[a,b)\f$
* \f[
* y = (b - a) \times \frac{\text{(random number between 0 and RAND_MAX)} \;
* \text{mod}\; 100}{100} + a \f]
*
* \param[in] a lower limit
* \param[in] b upper limit
* \returns random number in the range \f$[a,b)\f$
*/
double _random(double a, double b)
{
return ((b - a) * (rand() % 100) / 100.f) + a;
}
/**
* Save a given n-dimensional data martix to file.
*
* \param[in] fname filename to save in (gets overwritten without confirmation)
* \param[in] X matrix to save
* \param[in] num_points rows in the matrix = number of points
* \param[in] num_features columns in the matrix = dimensions of points
* \returns 0 if all ok
* \returns -1 if file creation failed
*/
int save_2d_data(const char *fname, double **X, int num_points,
int num_features)
{
FILE *fp = fopen(fname, "wt");
if (!fp) // error with fopen
{
char msg[120];
sprintf(msg, "File error (%s): ", fname);
perror(msg);
return -1;
}
for (int i = 0; i < num_points; i++) // for each point in the array
{
for (int j = 0; j < num_features; j++) // for each feature in the array
{
fprintf(fp, "%.4g", X[i][j]); // print the feature value
if (j < num_features - 1) // if not the last feature
fputc(',', fp); // suffix comma
}
if (i < num_points - 1) // if not the last row
fputc('\n', fp); // start a new line
}
fclose(fp);
return 0;
}
/**
* Create the distance matrix or
* [U-matrix](https://en.wikipedia.org/wiki/U-matrix) from the trained weights
* and save to disk.
*
* \param [in] fname filename to save in (gets overwriten without confirmation)
* \param [in] W model matrix to save
* \returns 0 if all ok
* \returns -1 if file creation failed
*/
int save_u_matrix(const char *fname, struct kohonen_array_3d *W)
{
FILE *fp = fopen(fname, "wt");
if (!fp) // error with fopen
{
char msg[120];
sprintf(msg, "File error (%s): ", fname);
perror(msg);
return -1;
}
int R = max(W->dim1 >> 3, 2); /* neighborhood range */
for (int i = 0; i < W->dim1; i++) // for each x
{
for (int j = 0; j < W->dim2; j++) // for each y
{
double distance = 0.f;
int k;
int from_x = max(0, i - R);
int to_x = min(W->dim1, i + R + 1);
int from_y = max(0, j - R);
int to_y = min(W->dim2, j + R + 1);
int l;
#ifdef _OPENMP
#pragma omp parallel for reduction(+ : distance)
#endif
for (l = from_x; l < to_x; l++) // scan neighborhoor in x
{
for (int m = from_y; m < to_y; m++) // scan neighborhood in y
{
double d = 0.f;
for (k = 0; k < W->dim3; k++) // for each feature
{
double *w1 = kohonen_data_3d(W, i, j, k);
double *w2 = kohonen_data_3d(W, l, m, k);
d += (w1[0] - w2[0]) * (w1[0] - w2[0]);
// distance += w1[0] * w1[0];
}
distance += sqrt(d);
// distance += d;
}
}
distance /= R * R; // mean distance from neighbors
fprintf(fp, "%.4g", distance); // print the mean separation
if (j < W->dim2 - 1) // if not the last column
fputc(',', fp); // suffix comma
}
if (i < W->dim1 - 1) // if not the last row
fputc('\n', fp); // start a new line
}
fclose(fp);
return 0;
}
/**
* Get minimum value and index of the value in a matrix
* \param[in] X matrix to search
* \param[in] N number of points in the vector
* \param[out] val minimum value found
* \param[out] x_idx x-index where minimum value was found
* \param[out] y_idx y-index where minimum value was found
*/
void get_min_2d(double **X, int N, double *val, int *x_idx, int *y_idx)
{
val[0] = INFINITY; // initial min value
for (int i = 0; i < N; i++) // traverse each x-index
{
for (int j = 0; j < N; j++) // traverse each y-index
{
if (X[i][j] < val[0]) // if a lower value is found
{ // save the value and its index
x_idx[0] = i;
y_idx[0] = j;
val[0] = X[i][j];
}
}
}
}
/**
* Update weights of the SOM using Kohonen algorithm
*
* \param[in] X data point
* \param[in,out] W weights matrix
* \param[in,out] D temporary vector to store distances
* \param[in] num_out number of output points
* \param[in] num_features number of features per input sample
* \param[in] alpha learning rate \f$0<\alpha\le1\f$
* \param[in] R neighborhood range
* \returns minimum distance of sample and trained weights
*/
double kohonen_update_weights(const double *X, struct kohonen_array_3d *W,
double **D, int num_out, int num_features,
double alpha, int R)
{
int x, y, k;
double d_min = 0.f;
#ifdef _OPENMP
#pragma omp for
#endif
// step 1: for each 2D output point
for (x = 0; x < num_out; x++)
{
for (y = 0; y < num_out; y++)
{
D[x][y] = 0.f;
// compute Euclidian distance of each output
// point from the current sample
for (k = 0; k < num_features; k++)
{
double *w = kohonen_data_3d(W, x, y, k);
D[x][y] += (w[0] - X[k]) * (w[0] - X[k]);
}
D[x][y] = sqrt(D[x][y]);
}
}
// step 2: get closest node i.e., node with smallest Euclidian distance to
// the current pattern
int d_min_x, d_min_y;
get_min_2d(D, num_out, &d_min, &d_min_x, &d_min_y);
// step 3a: get the neighborhood range
int from_x = max(0, d_min_x - R);
int to_x = min(num_out, d_min_x + R + 1);
int from_y = max(0, d_min_y - R);
int to_y = min(num_out, d_min_y + R + 1);
// step 3b: update the weights of nodes in the
// neighborhood
#ifdef _OPENMP
#pragma omp for
#endif
for (x = from_x; x < to_x; x++)
{
for (y = from_y; y < to_y; y++)
{
/* you can enable the following normalization if needed.
personally, I found it detrimental to convergence */
// const double s2pi = sqrt(2.f * M_PI);
// double normalize = 1.f / (alpha * s2pi);
/* apply scaling inversely proportional to distance from the
current node */
double d2 =
(d_min_x - x) * (d_min_x - x) + (d_min_y - y) * (d_min_y - y);
double scale_factor = exp(-d2 / (2.f * alpha * alpha));
for (k = 0; k < num_features; k++)
{
double *w = kohonen_data_3d(W, x, y, k);
// update weights of nodes in the neighborhood
w[0] += alpha * scale_factor * (X[k] - w[0]);
}
}
}
return d_min;
}
/**
* Apply incremental algorithm with updating neighborhood and learning rates
* on all samples in the given datset.
*
* \param[in] X data set
* \param[in,out] W weights matrix
* \param[in] num_samples number of output points
* \param[in] num_features number of features per input sample
* \param[in] num_out number of output points
* \param[in] alpha_min terminal value of alpha
*/
void kohonen_som(double **X, struct kohonen_array_3d *W, int num_samples,
int num_features, int num_out, double alpha_min)
{
int R = num_out >> 2, iter = 0;
double **D = (double **)malloc(num_out * sizeof(double *));
for (int i = 0; i < num_out; i++)
D[i] = (double *)malloc(num_out * sizeof(double));
double dmin = 1.f; // average minimum distance of all samples
// Loop alpha from 1 to slpha_min
for (double alpha = 1.f; alpha > alpha_min && dmin > 1e-3;
alpha -= 0.001, iter++)
{
dmin = 0.f;
// Loop for each sample pattern in the data set
for (int sample = 0; sample < num_samples; sample++)
{
// update weights for the current input pattern sample
dmin += kohonen_update_weights(X[sample], W, D, num_out,
num_features, alpha, R);
}
// every 20th iteration, reduce the neighborhood range
if (iter % 100 == 0 && R > 1)
R--;
dmin /= num_samples;
printf("iter: %5d\t alpha: %.4g\t R: %d\td_min: %.4g\r", iter, alpha, R,
dmin);
}
putchar('\n');
for (int i = 0; i < num_out; i++) free(D[i]);
free(D);
}
/**
* @}
* @}
*/
/** Creates a random set of points distributed in four clusters in
* 3D space with centroids at the points
* * \f$(0,5, 0.5, 0.5)\f$
* * \f$(0,5,-0.5, -0.5)\f$
* * \f$(-0,5, 0.5, 0.5)\f$
* * \f$(-0,5,-0.5, -0.5)\f$
*
* \param[out] data matrix to store data in
* \param[in] N number of points required
*/
void test_2d_classes(double *const *data, int N)
{
const double R = 0.3; // radius of cluster
int i;
const int num_classes = 4;
const double centres[][2] = {
// centres of each class cluster
{.5, .5}, // centre of class 1
{.5, -.5}, // centre of class 2
{-.5, .5}, // centre of class 3
{-.5, -.5} // centre of class 4
};
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < N; i++)
{
int class =
rand() % num_classes; // select a random class for the point
// create random coordinates (x,y,z) around the centre of the class
data[i][0] = _random(centres[class][0] - R, centres[class][0] + R);
data[i][1] = _random(centres[class][1] - R, centres[class][1] + R);
/* The follosing can also be used
for (int j = 0; j < 2; j++)
data[i][j] = _random(centres[class][j] - R, centres[class][j] + R);
*/
}
}
/** Test that creates a random set of points distributed in four clusters in
* 2D space and trains an SOM that finds the topological pattern.
* The following [CSV](https://en.wikipedia.org/wiki/Comma-separated_values)
* files are created to validate the execution:
* * `test1.csv`: random test samples points with a circular pattern
* * `w11.csv`: initial random U-matrix
* * `w12.csv`: trained SOM U-matrix
*/
void test1()
{
int j, N = 300;
int features = 2;
int num_out = 30; // image size - N x N
// 2D space, hence size = number of rows * 2
double **X = (double **)malloc(N * sizeof(double *));
// cluster nodex in 'x' * cluster nodes in 'y' * 2
struct kohonen_array_3d W;
W.dim1 = num_out;
W.dim2 = num_out;
W.dim3 = features;
W.data = (double *)malloc(num_out * num_out * features *
sizeof(double)); // assign rows
for (int i = 0; i < max(num_out, N); i++) // loop till max(N, num_out)
{
if (i < N) // only add new arrays if i < N
X[i] = (double *)malloc(features * sizeof(double));
if (i < num_out) // only add new arrays if i < num_out
{
for (int k = 0; k < num_out; k++)
{
#ifdef _OPENMP
#pragma omp for
#endif
// preallocate with random initial weights
for (j = 0; j < features; j++)
{
double *w = kohonen_data_3d(&W, i, k, j);
w[0] = _random(-5, 5);
}
}
}
}
test_2d_classes(X, N); // create test data around circumference of a circle
save_2d_data("test1.csv", X, N, features); // save test data points
save_u_matrix("w11.csv", &W); // save initial random weights
kohonen_som(X, &W, N, features, num_out, 1e-4); // train the SOM
save_u_matrix("w12.csv", &W); // save the resultant weights
for (int i = 0; i < N; i++) free(X[i]);
free(X);
free(W.data);
}
/** Creates a random set of points distributed in four clusters in
* 3D space with centroids at the points
* * \f$(0,5, 0.5, 0.5)\f$
* * \f$(0,5,-0.5, -0.5)\f$
* * \f$(-0,5, 0.5, 0.5)\f$
* * \f$(-0,5,-0.5, -0.5)\f$
*
* \param[out] data matrix to store data in
* \param[in] N number of points required
*/
void test_3d_classes1(double *const *data, int N)
{
const double R = 0.2; // radius of cluster
int i;
const int num_classes = 4;
const double centres[][3] = {
// centres of each class cluster
{.5, .5, .5}, // centre of class 1
{.5, -.5, -.5}, // centre of class 2
{-.5, .5, .5}, // centre of class 3
{-.5, -.5 - .5} // centre of class 4
};
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < N; i++)
{
int class =
rand() % num_classes; // select a random class for the point
// create random coordinates (x,y,z) around the centre of the class
data[i][0] = _random(centres[class][0] - R, centres[class][0] + R);
data[i][1] = _random(centres[class][1] - R, centres[class][1] + R);
data[i][2] = _random(centres[class][2] - R, centres[class][2] + R);
/* The follosing can also be used
for (int j = 0; j < 3; j++)
data[i][j] = _random(centres[class][j] - R, centres[class][j] + R);
*/
}
}
/** Test that creates a random set of points distributed in 4 clusters in
* 3D space and trains an SOM that finds the topological pattern. The following
* [CSV](https://en.wikipedia.org/wiki/Comma-separated_values) files are created
* to validate the execution:
* * `test2.csv`: random test samples points
* * `w21.csv`: initial random U-matrix
* * `w22.csv`: trained SOM U-matrix
*/
void test2()
{
int j, N = 500;
int features = 3;
int num_out = 30; // image size - N x N
// 3D space, hence size = number of rows * 3
double **X = (double **)malloc(N * sizeof(double *));
// cluster nodex in 'x' * cluster nodes in 'y' * 2
struct kohonen_array_3d W;
W.dim1 = num_out;
W.dim2 = num_out;
W.dim3 = features;
W.data = (double *)malloc(num_out * num_out * features *
sizeof(double)); // assign rows
for (int i = 0; i < max(num_out, N); i++) // loop till max(N, num_out)
{
if (i < N) // only add new arrays if i < N
X[i] = (double *)malloc(features * sizeof(double));
if (i < num_out) // only add new arrays if i < num_out
{
for (int k = 0; k < num_out; k++)
{
#ifdef _OPENMP
#pragma omp for
#endif
for (j = 0; j < features; j++)
{ // preallocate with random initial weights
double *w = kohonen_data_3d(&W, i, k, j);
w[0] = _random(-5, 5);
}
}
}
}
test_3d_classes1(X, N); // create test data
save_2d_data("test2.csv", X, N, features); // save test data points
save_u_matrix("w21.csv", &W); // save initial random weights
kohonen_som(X, &W, N, features, num_out, 1e-4); // train the SOM
save_u_matrix("w22.csv", &W); // save the resultant weights
for (int i = 0; i < N; i++) free(X[i]);
free(X);
free(W.data);
}
/** Creates a random set of points distributed in four clusters in
* 3D space with centroids at the points
* * \f$(0,5, 0.5, 0.5)\f$
* * \f$(0,5,-0.5, -0.5)\f$
* * \f$(-0,5, 0.5, 0.5)\f$
* * \f$(-0,5,-0.5, -0.5)\f$
*
* \param[out] data matrix to store data in
* \param[in] N number of points required
*/
void test_3d_classes2(double *const *data, int N)
{
const double R = 0.2; // radius of cluster
int i;
const int num_classes = 8;
const double centres[][3] = {
// centres of each class cluster
{.5, .5, .5}, // centre of class 1
{.5, .5, -.5}, // centre of class 2
{.5, -.5, .5}, // centre of class 3
{.5, -.5, -.5}, // centre of class 4
{-.5, .5, .5}, // centre of class 5
{-.5, .5, -.5}, // centre of class 6
{-.5, -.5, .5}, // centre of class 7
{-.5, -.5, -.5} // centre of class 8
};
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < N; i++)
{
int class =
rand() % num_classes; // select a random class for the point
// create random coordinates (x,y,z) around the centre of the class
data[i][0] = _random(centres[class][0] - R, centres[class][0] + R);
data[i][1] = _random(centres[class][1] - R, centres[class][1] + R);
data[i][2] = _random(centres[class][2] - R, centres[class][2] + R);
/* The follosing can also be used
for (int j = 0; j < 3; j++)
data[i][j] = _random(centres[class][j] - R, centres[class][j] + R);
*/
}
}
/** Test that creates a random set of points distributed in eight clusters in
* 3D space and trains an SOM that finds the topological pattern. The following
* [CSV](https://en.wikipedia.org/wiki/Comma-separated_values) files are created
* to validate the execution:
* * `test3.csv`: random test samples points
* * `w31.csv`: initial random U-matrix
* * `w32.csv`: trained SOM U-matrix
*/
void test3()
{
int j, N = 500;
int features = 3;
int num_out = 30;
double **X = (double **)malloc(N * sizeof(double *));
// cluster nodex in 'x' * cluster nodes in 'y' * 2
struct kohonen_array_3d W;
W.dim1 = num_out;
W.dim2 = num_out;
W.dim3 = features;
W.data = (double *)malloc(num_out * num_out * features *
sizeof(double)); // assign rows
for (int i = 0; i < max(num_out, N); i++) // loop till max(N, num_out)
{
if (i < N) // only add new arrays if i < N
X[i] = (double *)malloc(features * sizeof(double));
if (i < num_out) // only add new arrays if i < num_out
{
for (int k = 0; k < num_out; k++)
{
#ifdef _OPENMP
#pragma omp for
#endif
// preallocate with random initial weights
for (j = 0; j < features; j++)
{
double *w = kohonen_data_3d(&W, i, k, j);
w[0] = _random(-5, 5);
}
}
}
}
test_3d_classes2(X, N); // create test data around the lamniscate
save_2d_data("test3.csv", X, N, features); // save test data points
save_u_matrix("w31.csv", &W); // save initial random weights
kohonen_som(X, &W, N, features, num_out, 0.01); // train the SOM
save_u_matrix("w32.csv", &W); // save the resultant weights
for (int i = 0; i < N; i++) free(X[i]);
free(X);
free(W.data);
}
/**
* Convert clock cycle difference to time in seconds
*
* \param[in] start_t start clock
* \param[in] end_t end clock
* \returns time difference in seconds
*/
double get_clock_diff(clock_t start_t, clock_t end_t)
{
return (double)(end_t - start_t) / (double)CLOCKS_PER_SEC;
}
/** Main function */
int main(int argc, char **argv)
{
#ifdef _OPENMP
printf("Using OpenMP based parallelization\n");
#else
printf("NOT using OpenMP based parallelization\n");
#endif
clock_t start_clk, end_clk;
start_clk = clock();
test1();
end_clk = clock();
printf("Test 1 completed in %.4g sec\n",
get_clock_diff(start_clk, end_clk));
start_clk = clock();
test2();
end_clk = clock();
printf("Test 2 completed in %.4g sec\n",
get_clock_diff(start_clk, end_clk));
start_clk = clock();
test3();
end_clk = clock();
printf("Test 3 completed in %.4g sec\n",
get_clock_diff(start_clk, end_clk));
printf("(Note: Calculated times include: writing files to disk.)\n\n");
return 0;
}
|
GB_emult_04.c | //------------------------------------------------------------------------------
// GB_emult_04: C<M>= A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C<M>= A.*B, M sparse/hyper, A and B bitmap/full. C has the same sparsity
// structure as M, and its pattern is a subset of M.
// ------------------------------------------
// C <M>= A .* B
// ------------------------------------------
// sparse sparse bitmap bitmap (method: 04)
// sparse sparse bitmap full (method: 04)
// sparse sparse full bitmap (method: 04)
// sparse sparse full full (method: 04)
// TODO: this function can also do eWiseAdd, just as easily.
// Just change the "&&" to "||" in the GB_emult_04_template.
// If A and B are both full, eadd and emult are identical.
#include "GB_ewise.h"
#include "GB_emult.h"
#include "GB_binop.h"
#include "GB_unused.h"
#ifndef GBCOMPACT
#include "GB_binop__include.h"
#endif
#define GB_FREE_WORK \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_WERK_POP (M_ek_slicing, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_phbix_free (C) ; \
}
GrB_Info GB_emult_04 // C<M>=A.*B, M sparse/hyper, A and B bitmap/full
(
GrB_Matrix C, // output matrix, static header
const GrB_Type ctype, // type of output matrix C
const bool C_is_csc, // format of output matrix C
const GrB_Matrix M, // sparse/hyper, not NULL
const bool Mask_struct, // if true, use the only structure of M
bool *mask_applied, // if true, the mask was applied
const GrB_Matrix A, // input A matrix (bitmap/full)
const GrB_Matrix B, // input B matrix (bitmap/full)
const GrB_BinaryOp op, // op to perform C = op (A,B)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (C != NULL && C->static_header) ;
ASSERT_MATRIX_OK (M, "M for emult_04", GB0) ;
ASSERT_MATRIX_OK (A, "A for emult_04", GB0) ;
ASSERT_MATRIX_OK (B, "B for emult_04", GB0) ;
ASSERT_BINARYOP_OK (op, "op for emult_04", GB0) ;
ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ;
ASSERT (!GB_PENDING (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (GB_IS_BITMAP (A) || GB_IS_FULL (A) || GB_as_if_full (A)) ;
ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B) || GB_as_if_full (B)) ;
int C_sparsity = GB_sparsity (M) ;
GBURBLE ("emult_04:(%s<%s>=%s.*%s) ",
GB_sparsity_char (C_sparsity),
GB_sparsity_char_matrix (M),
GB_sparsity_char_matrix (A),
GB_sparsity_char_matrix (B)) ;
//--------------------------------------------------------------------------
// declare workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
int64_t *restrict Wfirst = NULL ;
int64_t *restrict Wlast = NULL ;
int64_t *restrict Cp_kfirst = NULL ;
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
//--------------------------------------------------------------------------
// get M, A, and B
//--------------------------------------------------------------------------
const int64_t *restrict Mp = M->p ;
const int64_t *restrict Mh = M->h ;
const int64_t *restrict Mi = M->i ;
const GB_void *restrict Mx = (Mask_struct) ? NULL : (GB_void *) M->x ;
const int64_t vlen = M->vlen ;
const int64_t vdim = M->vdim ;
const int64_t nvec = M->nvec ;
const int64_t mnz = GB_nnz (M) ;
const size_t msize = M->type->size ;
const int8_t *restrict Ab = A->b ;
const int8_t *restrict Bb = B->b ;
//--------------------------------------------------------------------------
// check if C is iso and compute its iso value if it is
//--------------------------------------------------------------------------
const size_t csize = ctype->size ;
GB_void cscalar [GB_VLA(csize)] ;
bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ;
//--------------------------------------------------------------------------
// allocate C->p and C->h
//--------------------------------------------------------------------------
GB_OK (GB_new (&C, true, // sparse or hyper (same as M), static header
ctype, vlen, vdim, GB_Ap_calloc, C_is_csc,
C_sparsity, M->hyper_switch, nvec, Context)) ;
int64_t *restrict Cp = C->p ;
//--------------------------------------------------------------------------
// slice the mask matrix M
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int M_ntasks, M_nthreads ;
GB_SLICE_MATRIX (M, 8, chunk) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_PUSH (Work, 3*M_ntasks, int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Wfirst = Work ;
Wlast = Work + M_ntasks ;
Cp_kfirst = Work + M_ntasks * 2 ;
//--------------------------------------------------------------------------
// count entries in C
//--------------------------------------------------------------------------
// This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR).
// TODO: if M is structural and A and B are both full, then C has exactly
// the same pattern as M, the first phase can be skipped.
int tid ;
#pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < M_ntasks ; tid++)
{
int64_t kfirst = kfirst_Mslice [tid] ;
int64_t klast = klast_Mslice [tid] ;
Wfirst [tid] = 0 ;
Wlast [tid] = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// count the entries in C(:,j)
int64_t j = GBH (Mh, k) ;
int64_t pstart = j * vlen ; // start of A(:,j) and B(:,j)
int64_t pM, pM_end ;
GB_get_pA (&pM, &pM_end, tid, k,
kfirst, klast, pstart_Mslice, Mp, vlen) ;
int64_t cjnz = 0 ;
for ( ; pM < pM_end ; pM++)
{
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t i = Mi [pM] ;
cjnz +=
(GBB (Ab, pstart + i)
&& // TODO: for GB_add, use || instead
GBB (Bb, pstart + i)) ;
}
}
if (k == kfirst)
{
Wfirst [tid] = cjnz ;
}
else if (k == klast)
{
Wlast [tid] = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
}
}
//--------------------------------------------------------------------------
// finalize Cp, cumulative sum of Cp and compute Cp_kfirst
//--------------------------------------------------------------------------
GB_ek_slice_merge1 (Cp, Wfirst, Wlast, M_ek_slicing, M_ntasks) ;
GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec,
Wfirst, Wlast, M_ek_slicing, M_ntasks, M_nthreads, Context) ;
//--------------------------------------------------------------------------
// allocate C->i and C->x
//--------------------------------------------------------------------------
int64_t cnz = Cp [nvec] ;
// set C->iso = C_iso OK
GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, C_iso, Context)) ;
//--------------------------------------------------------------------------
// copy pattern into C
//--------------------------------------------------------------------------
// TODO: could make these components of C shallow instead
if (GB_IS_HYPERSPARSE (M))
{
// copy M->h into C->h
GB_memcpy (C->h, Mh, nvec * sizeof (int64_t), M_nthreads) ;
}
C->nvec = nvec ;
C->jumbled = M->jumbled ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// get the opcode
//--------------------------------------------------------------------------
GB_Opcode opcode = op->opcode ;
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
bool op_is_first = (opcode == GB_FIRST_opcode) ;
bool op_is_second = (opcode == GB_SECOND_opcode) ;
bool op_is_pair = (opcode == GB_PAIR_opcode) ;
GB_Type_code ccode = ctype->code ;
//--------------------------------------------------------------------------
// check if the values of A and/or B are ignored
//--------------------------------------------------------------------------
// With C = ewisemult (A,B), only the intersection of A and B is used.
// If op is SECOND or PAIR, the values of A are never accessed.
// If op is FIRST or PAIR, the values of B are never accessed.
// If op is PAIR, the values of A and B are never accessed.
// Contrast with ewiseadd.
// A is passed as x, and B as y, in z = op(x,y)
bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ;
bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ;
//--------------------------------------------------------------------------
// using a built-in binary operator (except for positional operators)
//--------------------------------------------------------------------------
#define GB_PHASE_2_OF_2
if (C_iso)
{
//----------------------------------------------------------------------
// C is iso
//----------------------------------------------------------------------
// Cx [0] = cscalar = op (A,B)
GB_BURBLE_MATRIX (C, "(iso emult) ") ;
memcpy (C->x, cscalar, csize) ;
// pattern of C = set intersection of pattern of A and B
#define GB_ISO_EMULT
#include "GB_emult_04_template.c"
}
else
{
//----------------------------------------------------------------------
// C is non-iso
//----------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_AemultB_04(mult,xname) GB (_AemultB_04_ ## mult ## xname)
#define GB_BINOP_WORKER(mult,xname) \
{ \
info = GB_AemultB_04(mult,xname) (C, M, Mask_struct, A, B, \
Cp_kfirst, M_ek_slicing, M_ntasks, M_nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
GB_Type_code xcode, ycode, zcode ;
if (!op_is_positional &&
GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern,
op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode)
{
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
#endif
//----------------------------------------------------------------------
// generic worker
//----------------------------------------------------------------------
if (!done)
{
GB_BURBLE_MATRIX (C, "(generic emult_04: %s) ", op->name) ;
GB_ewise_generic (C, op, NULL, 0, 0,
NULL, NULL, NULL, C_sparsity, GB_EMULT_METHOD4, Cp_kfirst,
M_ek_slicing, M_ntasks, M_nthreads, NULL, 0, 0, NULL, 0, 0,
M, Mask_struct, false, A, B, Context) ;
}
}
//--------------------------------------------------------------------------
// remove empty vectors from C, if hypersparse
//--------------------------------------------------------------------------
GB_OK (GB_hypermatrix_prune (C, Context)) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
ASSERT_MATRIX_OK (C, "C output for emult_04", GB0) ;
(*mask_applied) = true ;
return (GrB_SUCCESS) ;
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
struct OMPTraitProperty;
struct OMPTraitSelector;
struct OMPTraitSet;
class OMPTraitInfo;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
/// Checked C contextual keywords
/// These keywords are for bounds expressions. They are contextual to avoid
/// collisions with existing identifiers in programs. Some keywords like "count"
/// and "any" are likely to collide. Others are unlikely to collide, but we make
/// them contextual for consistency.
/// \brief Identifier for "bounds".
IdentifierInfo *Ident_bounds;
/// \brief Identifier for "byte_count".
IdentifierInfo *Ident_byte_count;
/// \brief Identifier for "count".
IdentifierInfo *Ident_count;
/// \brief Identifier for "unknown".
IdentifierInfo *Ident_unknown;
/// \brief Identifier for "itype"
IdentifierInfo *Ident_itype;
/// \brief Identifier for "rel_align"
IdentifierInfo *Ident_rel_align;
/// \brief Identifier for "rel_align_value"
IdentifierInfo *Ident_rel_align_value;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFenvAccessHandler;
std::unique_ptr<PragmaHandler> STDCFenvRoundHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<PragmaHandler> CheckedScopeHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// Current kind of OpenMP clause
OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
void MaybeDestroyTemplateIds() {
if (!TemplateIds.empty() &&
(Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens()))
DestroyTemplateIds();
}
void DestroyTemplateIds();
/// RAII object to destroy TemplateIdAnnotations where possible, from a
/// likely-good position during parsing.
struct DestroyTemplateIdAnnotationsRAIIObj {
Parser &Self;
DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {}
~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); }
};
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ROUND...
void HandlePragmaFEnvRound();
/// Handle the annotation token produced for
/// #pragma float_control
void HandlePragmaFloatControl();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// \brief Handle the annotation token produced for
/// #pragma CHECKED_SCOPE [on-off-switch]
void HandlePragmaCheckedScope();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static TypeResult getTypeAnnotation(const Token &Tok) {
if (!Tok.getAnnotationValue())
return TypeError();
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, TypeResult T) {
assert((T.isInvalid() || T.get()) &&
"produced a valid-but-null type annotation?");
Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
/// Kinds of compound pseudo-tokens formed by a sequence of two real tokens.
enum class CompoundToken {
/// A '(' '{' beginning a statement-expression.
StmtExprBegin,
/// A '}' ')' ending a statement-expression.
StmtExprEnd,
/// A '[' '[' beginning a C++11 or C2x attribute.
AttrBegin,
/// A ']' ']' ending a C++11 or C2x attribute.
AttrEnd,
/// A '::' '*' forming a C++ pointer-to-member declaration.
MemberPtr,
};
/// Check that a compound operator was written in a "sensible" way, and warn
/// if not.
void checkCompoundToken(SourceLocation FirstTokLoc,
tok::TokenKind FirstTokKind, CompoundToken Op);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// Introduces zero or more scopes for parsing. The scopes will all be exited
/// when the object is destroyed.
class MultiParseScope {
Parser &Self;
unsigned NumScopes = 0;
MultiParseScope(const MultiParseScope&) = delete;
public:
MultiParseScope(Parser &Self) : Self(Self) {}
void Enter(unsigned ScopeFlags) {
Self.EnterScope(ScopeFlags);
++NumScopes;
}
void Exit() {
while (NumScopes) {
Self.ExitScope();
--NumScopes;
}
}
~MultiParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
/// Re-enter the template scopes for a declaration that might be a template.
unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D);
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
// If the current scope is a Checked C _Forany or _Itypeforany scope, exit it.
// TODO: this probably doesn't belong in the parser.
void ExitQuantifiedTypeScope(DeclSpec &DS);
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser *Self;
/// Method - The method declaration.
Decl *Method;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), IsInterface(IsInterface),
TagOrTemplate(TagOrTemplate) {}
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
// In ParseCXXInlineMethods.cpp.
struct ReenterTemplateScopeRAII;
struct ReenterClassScopeRAII;
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator(
llvm::function_ref<void(const Designation &)> CodeCompleteCB);
//===--------------------------------------------------------------------===//
// Checked C Expressions
/// \brief Return true if this token can start a bounds expression.
bool StartsBoundsExpression(const Token &Tok);
/// \brief Return true if this token can start a bounds-safe interface
/// type annotation.
bool StartsInteropTypeAnnotation(const Token &tok);
bool StartsRelativeBoundsClause(Token &tok);
bool StartsWhereClause(const Token &tok);
bool ParseRelativeBoundsClauseForDecl(ExprResult &Expr);
RelativeBoundsClause *ParseRelativeBoundsClause(bool &isError,
IdentifierInfo *Ident,
SourceLocation BoundsKWLoc);
void SkipInvalidBoundsExpr(SourceLocation CurrentLoc);
ExprResult ParseBoundsCastExpression();
ExprResult ParseBoundsExpression();
ExprResult ParseGenericFunctionApplication(ExprResult TypeFunc, SourceLocation Loc);
using TypeArgVector = SmallVector<TypeArgument, 4>;
std::pair<bool, TypeArgVector> ParseGenericTypeArgumentList(SourceLocation Loc);
QualType SubstituteTypeVariable(QualType QT,
SmallVector<TypeArgument, 4> &typeNames);
ExprResult ParseInteropTypeAnnotation(const Declarator &D, bool IsReturn=false);
bool ParseBoundsAnnotations(const Declarator &D,
SourceLocation ColonLoc,
BoundsAnnotations &Result,
std::unique_ptr<CachedTokens> *DeferredToks = nullptr,
bool IsReturn=false,
Decl *ThisDecl = nullptr);
bool DeferredParseBoundsAnnotations(std::unique_ptr<CachedTokens> Toks,
BoundsAnnotations &Result,
const Declarator &D,
Decl *ThisDecl = nullptr);
bool ConsumeAndStoreBoundsExpression(CachedTokens &Toks);
bool ConsumeAndStoreWhereClause(CachedTokens &Toks);
// Delay parse a return bounds expression in Toks. Used to parse return
// bounds after the return type has been constructed. Stores the bounds
// expression in Result. Returns true if there was a parsing error.
static bool ParseBoundsCallback(void *P,
std::unique_ptr<CachedTokens> Toks,
ArrayRef<ParmVarDecl *> Params,
BoundsAnnotations &Result,
const Declarator &D);
ExprResult ParseReturnValueExpression();
DeclResult ParseRecordTypeApplication(RecordDecl *Base, bool IsItypeGeneric);
/// Parse a pack expression of the form '_Pack(expr, existential_type, substitution_type)'.
ExprResult ParsePackExpression();
/// Enters and exits WhereClause scope. Invokes ParseWhereClauseHelper to parse a where
/// clause.
WhereClause *ParseWhereClause();
/// Parse a Checked C where clause.
WhereClause *ParseWhereClauseHelper();
/// Parse a Checked C where clause fact.
WhereClauseFact *ParseWhereClauseFact();
/// Parse a where clause occurring on a declaration.
/// Returns false on error, true otherwise.
bool ParseWhereClauseOnDecl(Decl *D);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false,
CheckedScopeSpecifier WrittenCSS = CSS_None,
SourceLocation CSSLoc = SourceLocation(),
SourceLocation CSMLoc = SourceLocation(),
SourceLocation BNDLoc = SourceLocation());
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc, Sema::ConditionKind CK,
SourceLocation *LParenLoc = nullptr,
SourceLocation *RParenLoc = nullptr);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Whether a defining-type-specifier is permitted in a given context.
enum class AllowDefiningTypeSpec {
/// The grammar doesn't allow a defining-type-specifier here, and we must
/// not parse one (eg, because a '{' could mean something else).
No,
/// The grammar doesn't allow a defining-type-specifier here, but we permit
/// one for error recovery purposes. Sema will reject.
NoButErrorRecovery,
/// The grammar allows a defining-type-specifier here, even though it's
/// always invalid. Sema will reject.
YesButInvalid,
/// The grammar allows a defining-type-specifier here, and one can be valid.
Yes
};
/// Is this a context in which we are parsing defining-type-specifiers (and
/// so permit class and enum definitions in addition to non-defining class and
/// enum elaborated-type-specifiers)?
static AllowDefiningTypeSpec
isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
return AllowDefiningTypeSpec::Yes;
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
return AllowDefiningTypeSpec::YesButInvalid;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
return AllowDefiningTypeSpec::NoButErrorRecovery;
case DeclSpecContext::DSC_trailing:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which an opaque-enum-declaration can appear?
static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
return true;
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope,
CheckedScopeSpecifier Kind = CSS_None);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
RecordDecl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Determine whether we could have an enum-base.
///
/// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise
/// only consider this to be an enum-base if the next token is a '{'.
///
/// \return \c false if this cannot possibly be an enum base; \c true
/// otherwise.
bool isEnumBase(bool AllowSemi);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
public:
TypeResult
ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context = DeclaratorContext::TypeName,
AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
return true;
}
return false;
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
void ParseCheckedPointerSpecifiers(DeclSpec & DS);
void ParseExistentialTypeSpecifier(DeclSpec &DS);
void ParseUnpackSpecifier(DeclSpec &DS);
void ParseExistentialTypeSpecifierHelper(DeclSpec &DS);
void ParseForanySpecifier(DeclSpec &DS);
bool ParseForanySpecifierHelper(DeclSpec &DS, Scope::ScopeFlags S);
void ParseItypeforanySpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse a `match` clause for an '#pragma omp declare variant'. Return true
/// if there was an error.
bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI,
OMPTraitInfo *ParentTI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse 'omp [begin] assume[s]' directive.
void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parse 'omp end assumes' directive.
void ParseOpenMPEndAssumesDirective(SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
/// it is not the current token.
void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind);
/// Check the \p FoundKind against the \p ExpectedKind, if not issue an error
/// that the "end" matching the "begin" directive of kind \p BeginKind was not
/// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd
/// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`.
void parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
OpenMPDirectiveKind ExpectedKind,
OpenMPDirectiveKind FoundKind,
SourceLocation MatchingLoc,
SourceLocation FoundLoc,
bool SkipUntilOpenMPEnd);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Tries to parse cast part of OpenMP array shaping operation:
/// '[' expression ']' { '[' expression ']' } ')'.
bool tryParseOpenMPArrayShapingCastPart();
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
/// Parses and creates OpenMP 5.0 iterators expression:
/// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier =
/// <range-specification> }+ ')'
ExprResult ParseOpenMPIteratorsExpr();
/// Parses allocators and traits in the context of the uses_allocator clause.
/// Expected format:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *DepModOrTailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
MapTypeModifiersLoc;
SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers>
MotionModifiers;
SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
logit_loss.h | /**
* Copyright (c) 2015 by Contributors
*/
#ifndef DIFACTO_LOSS_LOGIT_LOSS_H_
#define DIFACTO_LOSS_LOGIT_LOSS_H_
#include <vector>
#include <cmath>
#include "difacto/base.h"
#include "difacto/loss.h"
#include "dmlc/data.h"
#include "dmlc/omp.h"
#include "common/spmv.h"
namespace difacto {
/**
* \brief the logistic loss
*
* :math:`\ell(x,y,w) = log(1 + exp(- y <w, x>))`
*
*/
class LogitLoss : public Loss {
public:
LogitLoss() {}
virtual ~LogitLoss() {}
KWArgs Init(const KWArgs& kwargs) override {
return kwargs;
}
/**
* \brief perform prediction
*
* pred += X * w
*
* @param data the data X
* @param param input parameters
* - param[0], real_t vector, the weights
* - param[1], optional int vector, the weight positions
* @param pred predict output, should be pre-allocated
*/
void Predict(const dmlc::RowBlock<unsigned>& data,
const std::vector<SArray<char>>& param,
SArray<real_t>* pred) override {
CHECK_EQ(param.size(), 3);
Predict(data,
SArray<real_t>(param[0]),
SArray<int>(param[1]),
pred);
}
void Predict(const dmlc::RowBlock<unsigned>& data,
const SArray<real_t>& weights,
const SArray<int>& w_pos,
SArray<real_t>* pred) {
SArray<real_t> w = weights;
SpMV::Times(data, w, pred, nthreads_, w_pos, {});
}
/*!
* \brief compute the gradients
*
* p = - y ./ (1 + exp (y .* pred));
* grad += X' * p;
*
* @param data the data X
* @param param input parameters
* - param[0], real_t vector, the predict output
* - param[1], optional int vector, the gradient positions
* @param grad the results, should be pre-allocated
*/
void CalcGrad(const dmlc::RowBlock<unsigned>& data,
const std::vector<SArray<char>>& param,
SArray<real_t>* grad) override {
CHECK_EQ(param.size(), 4);
CalcGrad(data,
SArray<real_t>(param[0]),
SArray<int>(param[1]),
SArray<real_t>(param[3]),
grad);
}
void CalcGrad(const dmlc::RowBlock<unsigned>& data,
const SArray<real_t>& weights,
const SArray<int>& w_pos,
const SArray<real_t>& pred,
SArray<real_t>* grad) {
SArray<real_t> p; p.CopyFrom(pred);
CHECK_EQ(p.size(), data.size);
// p = ...
CHECK_NOTNULL(data.label);
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < p.size(); ++i) {
real_t y = data.label[i] > 0 ? 1 : -1;
if (data.weight) {
p[i] = - y / (1 + std::exp(y * p[i])) * data.weight[i];
} else {
p[i] = - y / (1 + std::exp(y * p[i]));
}
}
// grad += ...
SpMV::TransTimes(data, p, grad, nthreads_, {}, w_pos);
}
};
} // namespace difacto
#endif // DIFACTO_LOSS_LOGIT_LOSS_H_
|
GB_binop__isle_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isle_int8
// A.*B function (eWiseMult): GB_AemultB__isle_int8
// A*D function (colscale): GB_AxD__isle_int8
// D*A function (rowscale): GB_DxB__isle_int8
// C+=B function (dense accum): GB_Cdense_accumB__isle_int8
// C+=b function (dense accum): GB_Cdense_accumb__isle_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_int8
// C=scalar+B GB_bind1st__isle_int8
// C=scalar+B' GB_bind1st_tran__isle_int8
// C=A+scalar GB_bind2nd__isle_int8
// C=A'+scalar GB_bind2nd_tran__isle_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_INT8 || GxB_NO_ISLE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isle_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isle_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isle_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isle_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isle_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isle_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isle_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isle_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isle_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB_bind1st_tran__isle_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB_bind2nd_tran__isle_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
aggregation_move_generator.h | /*****************************************************************************/
// Copyright (c) 2020-2021 Yuji KOGUMA
// Released under the MIT license
// https://opensource.org/licenses/mit-license.php
/*****************************************************************************/
#ifndef PRINTEMPS_NEIGHBORHOOD_AGGREGATION_MOVE_GENERATOR_H__
#define PRINTEMPS_NEIGHBORHOOD_AGGREGATION_MOVE_GENERATOR_H__
#include "abstract_move_generator.h"
namespace printemps {
namespace neighborhood {
/*****************************************************************************/
template <class T_Variable, class T_Expression>
class AggregationMoveGenerator
: public AbstractMoveGenerator<T_Variable, T_Expression> {
private:
public:
/*************************************************************************/
AggregationMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
virtual ~AggregationMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
void setup(const std::vector<model_component::Constraint<
T_Variable, T_Expression> *> &a_RAW_CONSTRAINT_PTRS) {
/**
* Exclude constraints which contain fixed variables or selection
* variables.
*/
auto constraint_ptrs =
extract_effective_constraint_ptrs(a_RAW_CONSTRAINT_PTRS);
/**
* Convert constraint objects to BinomialConstraint objects.
*/
auto binomials = convert_to_binomial_constraints(constraint_ptrs);
/**
* Setup move objects.
*/
const int BINOMIALS_SIZE = binomials.size();
this->m_moves.resize(4 * BINOMIALS_SIZE);
this->m_flags.resize(4 * BINOMIALS_SIZE);
for (auto i = 0; i < BINOMIALS_SIZE; i++) {
auto &move = this->m_moves[4 * i];
move.sense = MoveSense::Aggregation;
move.alterations.emplace_back(binomials[i].variable_ptr_first, 0);
move.alterations.emplace_back(binomials[i].variable_ptr_second, 0);
move.is_univariable_move = false;
move.is_selection_move = false;
utility::update_union_set(
&(move.related_constraint_ptrs),
binomials[i].variable_ptr_first->related_constraint_ptrs());
utility::update_union_set(
&(move.related_constraint_ptrs),
binomials[i].variable_ptr_second->related_constraint_ptrs());
move.is_special_neighborhood_move = true;
move.is_available = true;
move.overlap_rate = 0.0;
this->m_moves[4 * i + 1] = move;
this->m_moves[4 * i + 2] = move;
this->m_moves[4 * i + 3] = move;
}
/**
* Setup move updater.
*/
auto move_updater = //
[this, binomials, BINOMIALS_SIZE](
auto * a_moves_ptr, //
auto * a_flags, //
const bool a_ACCEPT_ALL, //
const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, //
const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, //
[[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) {
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < BINOMIALS_SIZE; i++) {
{
auto index = 4 * i;
auto &alterations = (*a_moves_ptr)[index].alterations;
alterations[0].second =
binomials[i].variable_ptr_first->value() + 1;
alterations[1].second =
static_cast<T_Variable>(std::floor(
(-binomials[i].constant_value -
binomials[i].sensitivity_first *
(binomials[i].variable_ptr_first->value() +
1)) /
binomials[i].sensitivity_second +
0.5));
}
{
auto index = 4 * i + 1;
auto &alterations = (*a_moves_ptr)[index].alterations;
alterations[0].second =
binomials[i].variable_ptr_first->value() - 1;
alterations[1].second =
static_cast<T_Variable>(std::floor(
(-binomials[i].constant_value -
binomials[i].sensitivity_first *
(binomials[i].variable_ptr_first->value() -
1)) /
binomials[i].sensitivity_second +
0.5));
}
{
auto index = 4 * i + 2;
auto &alterations = (*a_moves_ptr)[index].alterations;
alterations[0]
.second = static_cast<T_Variable>(std::floor(
(-binomials[i].constant_value -
binomials[i].sensitivity_second *
(binomials[i].variable_ptr_second->value() +
1)) /
binomials[i].sensitivity_first +
0.5));
alterations[1].second =
binomials[i].variable_ptr_second->value() + 1;
}
{
auto index = 4 * i + 3;
auto &alterations = (*a_moves_ptr)[index].alterations;
alterations[0]
.second = static_cast<T_Variable>(std::floor(
(-binomials[i].constant_value -
binomials[i].sensitivity_second *
(binomials[i].variable_ptr_second->value() -
1)) /
binomials[i].sensitivity_first +
0.5));
alterations[1].second =
binomials[i].variable_ptr_second->value() - 1;
}
}
const int MOVES_SIZE = a_moves_ptr->size();
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < MOVES_SIZE; i++) {
(*a_flags)[i] = 1;
if (!(*a_moves_ptr)[i].is_available) {
(*a_flags)[i] = 0;
continue;
}
if (neighborhood::has_fixed_variable((*a_moves_ptr)[i])) {
(*a_flags)[i] = 0;
continue;
}
if (neighborhood::has_bound_violation((*a_moves_ptr)[i])) {
(*a_flags)[i] = 0;
continue;
}
if (a_ACCEPT_ALL) {
/** nothing to do */
} else {
if (a_ACCEPT_OBJECTIVE_IMPROVABLE &&
neighborhood::has_objective_improvable_variable(
(*a_moves_ptr)[i])) {
continue;
}
if (a_ACCEPT_FEASIBILITY_IMPROVABLE &&
neighborhood::has_feasibility_improvable_variable(
(*a_moves_ptr)[i])) {
continue;
}
(*a_flags)[i] = 0;
}
}
};
this->m_move_updater = move_updater;
}
};
} // namespace neighborhood
} // namespace printemps
#endif
/*****************************************************************************/
// END
/*****************************************************************************/ |
GB_subassign_17.c | //------------------------------------------------------------------------------
// GB_subassign_17: C(I,J)<!M,repl> = scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 17: C(I,J)<!M,repl> = scalar ; using S
// M: present
// Mask_comp: true
// C_replace: true
// accum: NULL
// A: scalar
// S: constructed
// C: not bitmap
// M: not bitmap
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_17
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_GET_C ; // C must not be bitmap
const int64_t Cnvec = C->nvec ;
const int64_t *restrict Ch = C->h ;
const int64_t *restrict Cp = C->p ;
const bool C_is_hyper = (Ch != NULL) ;
GB_GET_MASK ;
GB_GET_SCALAR ;
GB_GET_S ;
GrB_BinaryOp accum = NULL ;
//--------------------------------------------------------------------------
// Method 17: C(I,J)<!M,repl> = scalar ; using S
//--------------------------------------------------------------------------
// Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is
// required. The sparsity of !M cannot be exploited.
// Methods 13, 15, 17, and 19 are very similar.
//--------------------------------------------------------------------------
// Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19)
//--------------------------------------------------------------------------
GB_SUBASSIGN_IXJ_SLICE ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
GB_GET_VECTOR_FOR_IXJ (M, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// assign the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
// both S (i,j) and A (i,j) present
GB_C_S_LOOKUP ;
if (mij)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =A ): copy A, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_noaccum_C_A_1_scalar ;
}
else
{
// ----[C A 0] or [X A 0]---------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): zombie
GB_DELETE_ENTRY ;
}
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
}
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
GB_GET_VECTOR_FOR_IXJ (M, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// assign the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
}
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
task_set.c | #include "communicator.h"
#include "task_set.h"
#ifdef __cplusplus
extern "C" {
#endif
#if TCI_USE_OPENMP_THREADS || TCI_USE_PTHREADS_THREADS || TCI_USE_WINDOWS_THREADS
void tci_task_set_init(tci_task_set* set, tci_comm* comm, unsigned ntask,
uint64_t work)
{
set->comm = comm;
set->ntask = ntask;
if (tci_comm_is_master(comm))
{
set->slots = (tci_slot*)malloc((ntask+1)*sizeof(tci_slot));
for (unsigned task = 0;task < ntask;task++)
tci_slot_init(set->slots+task+1, 0);
}
tci_comm_bcast(comm, (void**)&set->slots, 0);
unsigned nt = comm->nthread;
unsigned nt_outer, nt_inner;
tci_partition_2x2(nt, work, (work == 0 ? 1 : nt),
ntask, ntask, &nt_inner, &nt_outer);
tci_comm_gang(comm, &set->subcomm, TCI_EVENLY, nt_outer, 0);
}
void tci_task_set_destroy(tci_task_set* set)
{
tci_comm_barrier(set->comm);
tci_comm_destroy(&set->subcomm);
if (tci_comm_is_master(set->comm))
free((void*)set->slots);
}
int tci_task_set_visit(tci_task_set* set, tci_task_func func, unsigned task,
void* payload)
{
if (task > set->ntask) return EINVAL;
if (!tci_slot_try_fill(set->slots+task+1, 0, set->subcomm.gid+1))
return EALREADY;
func(&set->subcomm, task, payload);
return 0;
}
int tci_task_set_visit_all(tci_task_set* set, tci_task_func func,
void* payload)
{
for (unsigned task = 0;task < set->ntask;task++)
{
int ret = tci_task_set_visit(set, func, task, payload);
if (ret != 0) return ret;
}
return 0;
}
#elif TCI_USE_TBB_THREADS
void tci_task_set_init(tci_task_set* set, tci_comm* comm, unsigned ntask, uint64_t work)
{
(void)comm;
(void)work;
set->comm = (tci_comm*)new tbb::task_group();
set->ntask = ntask;
set->slots = new tci_slot[ntask];
for (unsigned task = 0;task < ntask;task++)
tci_slot_init(set->slots+task, 0);
}
void tci_task_set_destroy(tci_task_set* set)
{
((tbb::task_group*)set->comm)->wait();
delete[] set->slots;
delete (tbb::task_group*)set->comm;
}
int tci_task_set_visit(tci_task_set* set, tci_task_func func, unsigned task,
void* payload)
{
if (task > set->ntask) return EINVAL;
if (!tci_slot_try_fill(set->slots+task, 0, 1)) return EALREADY;
((tbb::task_group*)set->comm)->run(
[&,func,task,payload]
{
func(tci_single, task, payload);
});
return 0;
}
int tci_task_set_visit_all(tci_task_set* set, tci_task_func func,
void* payload)
{
for (unsigned task = 0;task < set->ntask;task++)
{
int ret = tci_task_set_visit(set, func, task, payload);
if (ret != 0) return ret;
}
return 0;
}
#elif TCI_USE_OMPTASK_THREADS
void tci_task_set_init(tci_task_set* set, tci_comm* comm, unsigned ntask, uint64_t work)
{
(void)comm;
(void)work;
set->ntask = ntask;
set->slots = (tci_slot*)malloc(sizeof(tci_slot)*ntask);
for (unsigned task = 0;task < ntask;task++)
tci_slot_init(set->slots+task, 0);
}
void tci_task_set_destroy(tci_task_set* set)
{
#pragma omp taskwait
free((void*)set->slots);
}
int tci_task_set_visit(tci_task_set* set, tci_task_func func, unsigned task,
void* payload)
{
if (task > set->ntask) return EINVAL;
if (!tci_slot_try_fill(set->slots+task, 0, 1)) return EALREADY;
#pragma omp task
{
func(tci_single, task, payload);
}
return 0;
}
int tci_task_set_visit_all(tci_task_set* set, tci_task_func func,
void* payload)
{
for (unsigned task = 0;task < set->ntask;task++)
{
int ret = tci_task_set_visit(set, func, task, payload);
if (ret != 0) return ret;
}
return 0;
}
#elif TCI_USE_DISPATCH_THREADS
void tci_task_set_init(tci_task_set* set, tci_comm* comm, unsigned ntask, uint64_t work)
{
(void)comm;
(void)work;
*(dispatch_group_t*)&set->comm = dispatch_group_create();
*(dispatch_queue_t*)&set->subcomm =
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
set->ntask = ntask;
set->slots = (tci_slot*)malloc(sizeof(tci_slot)*ntask);
for (unsigned task = 0;task < ntask;task++)
tci_slot_init(set->slots+task, 0);
}
void tci_task_set_destroy(tci_task_set* set)
{
dispatch_group_t group = *(dispatch_group_t*)&set->comm;
dispatch_group_wait(group, DISPATCH_TIME_FOREVER);
dispatch_release(group);
free((void*)set->slots);
}
typedef struct tci_task_func_data
{
tci_task_func func;
unsigned task;
void* payload;
} tci_task_func_data;
static void tci_task_launcher(void* data_)
{
tci_task_func_data* data = (tci_task_func_data*)data_;
data->func(tci_single, data->task, data->payload);
}
int tci_task_set_visit(tci_task_set* set, tci_task_func func, unsigned task,
void* payload)
{
if (task > set->ntask) return EINVAL;
if (!tci_slot_try_fill(set->slots+task, 0, 1)) return EALREADY;
tci_task_func_data data = {func, task, payload};
dispatch_group_t group = *(dispatch_group_t*)&set->comm;
dispatch_queue_t queue = *(dispatch_queue_t*)&set->subcomm;
dispatch_group_async_f(group, queue, &data, tci_task_launcher);
return 0;
}
int tci_task_set_visit_all(tci_task_set* set, tci_task_func func,
void* payload)
{
for (unsigned task = 0;task < set->ntask;task++)
{
int ret = tci_task_set_visit(set, func, task, payload);
if (ret != 0) return ret;
}
return 0;
}
#elif TCI_USE_PPL_THREADS
void tci_task_set_init(tci_task_set* set, tci_comm* comm, unsigned ntask, uint64_t work)
{
(void)comm;
(void)work;
set->comm = (tci_comm*)new concurrency::task_group();
set->ntask = ntask;
set->slots = new tci_slot[ntask];
for (unsigned task = 0;task < ntask;task++)
tci_slot_init(set->slots+task, 0);
}
void tci_task_set_destroy(tci_task_set* set)
{
((concurrency::task_group*)set->comm)->wait();
delete[] set->slots;
delete (concurrency::task_group*)set->comm;
}
int tci_task_set_visit(tci_task_set* set, tci_task_func func, unsigned task,
void* payload)
{
if (task > set->ntask) return EINVAL;
if (!tci_slot_try_fill(set->slots+task, 0, 1)) return EALREADY;
((concurrency::task_group*)set->comm)->run(
[&,func,task,payload]
{
func(tci_single, task, payload);
});
return 0;
}
int tci_task_set_visit_all(tci_task_set* set, tci_task_func func,
void* payload)
{
for (unsigned task = 0;task < set->ntask;task++)
{
int ret = tci_task_set_visit(set, func, task, payload);
if (ret != 0) return ret;
}
return 0;
}
#else // single threaded
void tci_task_set_init(tci_task_set* set, tci_comm* comm, unsigned ntask,
uint64_t work)
{
(void)comm;
(void)work;
set->ntask = ntask;
}
void tci_task_set_destroy(tci_task_set* set)
{
(void)set;
}
int tci_task_set_visit(tci_task_set* set, tci_task_func func, unsigned task,
void* payload)
{
if (task > set->ntask) return EINVAL;
func(tci_single, task, payload);
return 0;
}
int tci_task_set_visit_all(tci_task_set* set, tci_task_func func,
void* payload)
{
for (unsigned task = 0;task < set->ntask;task++)
func(tci_single, task, payload);
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_unaryop__abs_fp64_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_fp32
// op(A') function: GB_tran__abs_fp64_fp32
// C type: double
// A type: float
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_fp32
(
double *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kmeans.c | /** @file kmeans.c
** @brief K-means - Declaration
** @author Andrea Vedaldi, David Novotny
**/
/*
Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson.
Copyright (C) 2013 Andrea Vedaldi and David Novotny.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page kmeans K-means clustering
@author Andrea Vedaldi
@author David Novotny
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@ref kmeans.h implements a number of algorithm for **K-means
quantization**: Lloyd @cite{lloyd82least}, an accelerated version by
Elkan @cite{elkan03using}, and a large scale algorithm based on
Approximate Nearest Neighbors (ANN). All algorithms support @c float
or @c double data and can use the $l^1$ or the $l^2$ distance for
clustering. Furthermore, all algorithms can take advantage of multiple
CPU cores.
Please see @subpage kmeans-fundamentals for a technical description of
K-means and of the algorithms implemented here.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-starting Getting started
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The goal of K-means is to partition a dataset into $K$
“compact” clusters. The following example demonstrates
using @ref kmeans.h in the C programming language to partition @c
numData @c float vectors into compute @c numCenters clusters using
Lloyd's algorithm:
@code
#include <vl/kmeans.h>
double energy ;
double * centers ;
// Use float data and the L2 distance for clustering
KMeans * kmeans = vl_kmeans_new (VLDistanceL2, VL_TYPE_FLOAT) ;
// Use Lloyd algorithm
vl_kmeans_set_algorithm (kmeans, VlKMeansLloyd) ;
// Initialize the cluster centers by randomly sampling the data
vl_kmeans_init_centers_with_rand_data (kmeans, data, dimension, numData, numCenters) ;
// Run at most 100 iterations of cluster refinement using Lloyd algorithm
vl_kmeans_set_max_num_iterations (kmeans, 100) ;
vl_kmeans_refine_centers (kmeans, data, numData) ;
// Obtain the energy of the solution
energy = vl_kmeans_get_energy(kmeans) ;
// Obtain the cluster centers
centers = vl_kmeans_get_centers(kmeans) ;
@endcode
Once the centers have been obtained, new data points can be assigned
to clusters by using the ::vl_kmeans_quantize function:
@code
vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData) ;
float * distances = vl_malloc(sizeof(float) * numData) ;
vl_kmeans_quantize(kmeans, assignments, distances, data, numData) ;
@endcode
Alternatively, one can directly assign new pointers to the closest
centers, without bothering with a ::VlKMeans object.
There are several considerations that may impact the performance of
KMeans. First, since K-means is usually based local optimization
algorithm, the **initialization method** is important. The following
initialization methods are supported:
Method | Function | Description
---------------|-----------------------------------------|-----------------------------------------------
Random samples | ::vl_kmeans_init_centers_with_rand_data | Random data points
K-means++ | ::vl_kmeans_init_centers_plus_plus | Random selection biased towards diversity
Custom | ::vl_kmeans_set_centers | Choose centers (useful to run quantization only)
See @ref kmeans-init for further details. The initialization methods
use a randomized selection of the data points; the random number
generator init is controlled by ::vl_rand_init.
The second important choice is the **optimization algorithm**. The
following optimization algorithms are supported:
Algorithm | Symbol | See | Description
------------|------------------|-------------------|-----------------------------------------------
Lloyd | ::VlKMeansLloyd | @ref kmeans-lloyd | Alternate EM-style optimization
Elkan | ::VlKMeansElkan | @ref kmeans-elkan | A speedup using triangular inequalities
ANN | ::VlKMeansANN | @ref kmeans-ann | A speedup using approximated nearest neighbors
See the relative sections for further details. These algorithm are
iterative, and stop when either a **maximum number of iterations**
(::vl_kmeans_set_max_num_iterations) is reached, or when the energy
changes sufficiently slowly in one iteration (::vl_kmeans_set_min_energy_variation).
All the three algorithms support multithreaded computations. The number
of threads used is usually controlled globally by ::vl_set_num_threads.
**/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page kmeans-fundamentals K-means fundamentals
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
Given $n$ points $\bx_1,\dots,\bx_n \in \real^d$, the goal of K-means
is find $K$ `centers` $\bc_1,\dots,\bc_m \in \real^d$ and
`assignments` $q_1,\dots,q_n \in \{1,\dots,K\}$ of the points to the
centers such that the sum of distances
\[
E(\bc_1,\dots,\bc_k,q_1,\dots,q_n)
= \sum_{i=1}^n \|\bx_i - \bc_{q_i} \|_p^p
\]
is minimized. $K$-means is obtained for the case $p=2$ ($l^2$ norm),
because in this case the optimal centers are the means of the input
vectors assigned to them. Here the generalization $p=1$ ($l^1$ norm)
will also be considered.
Up to normalization, the K-means objective $E$ is also the average
reconstruction error if the original points are approximated with the
cluster centers. Thus K-means is used not only to group the input
points into cluster, but also to `quantize` their values.
K-means is widely used in computer vision, for example in the
construction of vocabularies of visual features (visual words). In
these applications the number $n$ of points to cluster and/or the
number $K$ of clusters is often large. Unfortunately, minimizing the
objective $E$ is in general a difficult combinatorial problem, so
locally optimal or approximated solutions are sought instead.
The basic K-means algorithm alternate between re-estimating the
centers and the assignments (@ref kmeans-lloyd). Combined with a good
initialization strategy (@ref kmeans-init) and, potentially, by
re-running the optimization from a number of randomized starting
states, this algorithm may attain satisfactory solutions in practice.
However, despite its simplicity, Lloyd's algorithm is often too slow.
A good replacement is Elkan's algorithm (@ref kmeans-elkan), which
uses the triangular inequality to cut down significantly the cost of
Lloyd's algorithm. Since this algorithm is otherwise equivalent, it
should often be preferred.
For very large problems (millions of point to clusters and hundreds,
thousands, or more clusters to find), even Elkan's algorithm is not
sufficiently fast. In these cases, one can resort to a variant of
Lloyd's algorithm that uses an approximated nearest neighbors routine
(@ref kmeans-ann).
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-init Initialization methods
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
All the $K$-means algorithms considered here find locally optimal
solutions; as such the way they are initialized is important. @ref
kmeans.h supports the following initialization algorithms:
@par Random data samples
The simplest initialization method is to sample $K$ points at random
from the input data and use them as initial values for the cluster
centers.
@par K-means++
@cite{arthur07k-means} proposes a randomized initialization of the
centers which improves upon random selection. The first center $\bc_1$
is selected at random from the data points $\bx_1, \dots, \bx_n $ and
the distance from this center to all points $\|\bx_i - \bc_1\|_p^p$ is
computed. Then the second center $\bc_2$ is selected at random from
the data points with probability proportional to the distance. The
procedure is repeated to obtain the other centers by using the minimum
distance to the centers collected so far.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-lloyd Lloyd's algorithm
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The most common K-means method is Lloyd's algorithm
@cite{lloyd82least}. This algorithm is based on the observation that,
while jointly optimizing clusters and assignment is difficult,
optimizing one given the other is easy. Lloyd's algorithm alternates
the steps:
1. **Quantization.** Each point $\bx_i$ is reassigned to the center
$\bc_{q_j}$ closer to it. This requires finding for each point the
closest among $K$ other points, which is potentially slow.
2. **Center estimation.** Each center $\bc_q$ is updated to minimize
its average distances to the points assigned to it. It is easy to
show that the best center is the mean or median of the points,
respectively if the $l^2$ or $l^1$ norm is considered.
A naive implementation of the assignment step requires $O(dnK)$
operations, where $d$ is the dimensionality of the data, $n$ the
number of data points, and $K$ the number of centers. Updating the
centers is much cheaper: $O(dn)$ operations suffice to compute the $K$
means and a slightly higher cost is required for the medians. Clearly,
the bottleneck is the assignment computation, and this is what the
other K-means algorithm try to improve.
During the iterations, it can happen that a cluster becomes empty. In
this case, K-means automatically **“restarts” the
cluster** center by selecting a training point at random.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-elkan Elkan's algorithm
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
Elkan's algorithm @cite{elkan03using} is a variation of Lloyd
alternate optimization algorithm (@ref kmeans-lloyd) that uses the
triangular inequality to avoid many distance calculations when
assigning points to clusters. While much faster than Lloyd, Elkan's
method uses storage proportional to the umber of clusters by data
points, which makes it unpractical for a very large number of
clusters.
The idea of this algorithm is that, if a center update does not move
them much, then most of the point-to-center computations can be
avoided when the point-to-center assignments are recomputed. To detect
which distances need evaluation, the triangular inequality is used to
lower and upper bound distances after a center update.
Elkan algorithms uses two key observations. First, one has
\[
\|\bx_i - \bc_{q_i}\|_p \leq \|\bc - \bc_{q_i}\|_p / 2
\quad\Rightarrow\quad
\|\bx_i - \bc_{q_i}\|_p \leq \|\bx_i - \bc\|_p.
\]
Thus if the distance between $\bx_i$ and its current center
$\bc_{q_i}$ is less than half the distance of the center $\bc_{q_i}$
to another center $\bc$, then $\bc$ can be skipped when the new
assignment for $\bx_i$ is searched. Checking this requires keeping
track of all the inter-center distances, but centers are typically a
small fraction of the training data, so overall this can be a
significant saving. In particular, if this condition is satisfied for
all the centers $\bc \not= \bc_{q_i}$, the point $\bx_i$ can be
skipped completely. Furthermore, the condition can be tested also
based on an upper bound $UB_i$ of $\|\bx_i - \bc_{q_i}\|_p$.
Second, if a center $\bc$ is updated to $\hat{\bc}$, then the new
distance from $\bx$ to $\hat{\bc}$ is bounded from below and above by
\[
\|\bx - \bc\|_p - \|bc - \hat\bc\|_p
\leq
\|\bx - \hat{\bc}\|_p
\leq
\|\bx - \hat{\bc}\|_p + \|\bc + \hat{\bc}\|_p.
\]
This allows to maintain an upper bound on the distance of $\bx_i$ to
its current center $\bc_{q_i}$ and a lower bound to any other center
$\bc$:
@f{align*}
UB_i & \leftarrow UB_i + \|\bc_{q_i} - \hat{\bc}_{q_i} \|_p \\
LB_i(\bc) & \leftarrow LB_i(\bc) - \|\bc -\hat \bc\|_p.
@f}
Thus the K-means algorithm becomes:
1. **Initialization.** Compute $LB_i(\bc) = \|\bx_i -\hat \bc\|_p$ for
all points and centers. Find the current assignments $q_i$ and
bounds $UB_i$ by finding the closest centers to each point: $UB_i =
\min_{\bc} LB_i(\bc)$.
2. **Center estimation.**
1. Recompute all the centers based on the new means; call the updated
version $\hat{\bc}$.
2. Update all the bounds based on the distance $\|\bc - \hat\bc\|_p$
as explained above.
3. Set $\bc \leftarrow \hat\bc$ for all the centers and go to the next
iteration.
3. **Quantization.**
1. Skip any point $\bx_i$ such that $UB_i \leq \frac{1}{2} \|\bc_{q_i} - \bc\|_p$
for all centers $\bc \not= \bc_{q_i}$.
2. For each remaining point $\bx_i$ and center $\bc \not= \bc_{q_i}$:
1. Skip $\bc$ if
\[
UB_i \leq \frac{1}{2} \| \bc_{q_i} - \bc \|
\quad\text{or}\quad
UB_i \leq LB_i(\bc).
\]
The first condition reflects the first observation above; the
second uses the bounds to decide if $\bc$ can be closer than the
current center $\bc_{q_i}$ to the point $\bx_i$. If the center
cannot be skipped, continue as follows.
3. Skip $\bc$ if the condition above is satisfied after making the
upper bound tight:
\[
UB_i = LB_i(\bc_{q_i}) = \| \bx_i - \bc_{q_i} \|_p.
\]
Note that the latter calculation can be done only once for $\bx_i$.
If the center cannot be skipped still, continue as follows.
4. Tighten the lower bound too:
\[
LB_i(\bc) = \| \bx_i - \bc \|_p.
\]
At this point both $UB_i$ and $LB_i(\bc)$ are tight. If $LB_i <
UB_i$, then the point $\bx_i$ should be reassigned to
$\bc$. Update $q_i$ to the index of center $\bc$ and reset $UB_i
= LB_i(\bc)$.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-ann ANN algorithm
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The *Approximate Nearest Neighbor* (ANN) K-means algorithm
@cite{beis97shape} @cite{silpa-anan08optimised} @cite{muja09fast} is a
variant of Lloyd's algorithm (@ref kmeans-lloyd) uses a best-bin-first
randomized KD-tree algorithm to approximately (and quickly) find the
closest cluster center to each point. The KD-tree implementation is
based on @ref kdtree.
The algorithm can be summarized as follows:
1. **Quantization.** Each point $\bx_i$ is reassigned to the center
$\bc_{q_j}$ closer to it. This starts by indexing the $K$ centers
by a KD-tree and then using the latter to quickly find the closest
center for every training point. The search is approximated to
further improve speed. This opens up the possibility that a data
point may receive an assignment that is *worse* than the current
one. This is avoided by checking that the new assignment estimated
by using ANN is an improvement; otherwise the old assignment is
kept.
2. **Center estimation.** Each center $\bc_q$ is updated to minimize
its average distances to the points assigned to it. It is easy to
show that the best center is the mean or median of the points,
respectively if the $l^2$ or $l^1$ norm is considered.
The key is to trade-off carefully the speedup obtained by using the
ANN algorithm and the loss in accuracy when retrieving neighbors. Due
to the curse of dimensionality, KD-trees become less effective for
higher dimensional data, so that the search cost, which in the best
case is logarithmic with this data structure, may become effectively
linear. This is somehow mitigated by the fact that new a new KD-tree
is computed at each iteration, reducing the likelihood that points may
get stuck with sub-optimal assignments.
Experiments with the quantization of 128-dimensional SIFT features
show that the ANN algorithm may use one quarter of the comparisons of
Elkan's while retaining a similar solution accuracy.
*/
#include "kmeans.h"
#include "generic.h"
#include "mathop.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* ================================================================ */
#ifndef VL_KMEANS_INSTANTIATING
/** ------------------------------------------------------------------
** @brief Reset state
**
** The function reset the state of the KMeans object. It deletes
** any stored centers, releasing the corresponding memory. This
** cancels the effect of seeding or setting the centers, but
** does not change the other configuration parameters.
**/
VL_EXPORT void
vl_kmeans_reset (VlKMeans * self)
{
self->numCenters = 0 ;
self->dimension = 0 ;
if (self->centers) vl_free(self->centers) ;
if (self->centerDistances) vl_free(self->centerDistances) ;
self->centers = NULL ;
self->centerDistances = NULL ;
}
/** ------------------------------------------------------------------
** @brief Create a new KMeans object
** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE)
** @param distance distance.
** @return new KMeans object instance.
**/
VL_EXPORT VlKMeans *
vl_kmeans_new (vl_type dataType,
VlVectorComparisonType distance)
{
VlKMeans * self = vl_calloc(1, sizeof(VlKMeans)) ;
self->algorithm = VlKMeansLloyd ;
self->distance = distance ;
self->dataType = dataType ;
self->verbosity = 0 ;
self->maxNumIterations = 100 ;
self->minEnergyVariation = 1e-4 ;
self->numRepetitions = 1 ;
self->centers = NULL ;
self->centerDistances = NULL ;
self->numTrees = 3;
self->maxNumComparisons = 100;
vl_kmeans_reset (self) ;
return self ;
}
/** ------------------------------------------------------------------
** @brief Create a new KMeans object by copy
** @param kmeans KMeans object to copy.
** @return new copy.
**/
VL_EXPORT VlKMeans *
vl_kmeans_new_copy (VlKMeans const * kmeans)
{
VlKMeans * self = vl_malloc(sizeof(VlKMeans)) ;
self->algorithm = kmeans->algorithm ;
self->distance = kmeans->distance ;
self->dataType = kmeans->dataType ;
self->verbosity = kmeans->verbosity ;
self->maxNumIterations = kmeans->maxNumIterations ;
self->numRepetitions = kmeans->numRepetitions ;
self->dimension = kmeans->dimension ;
self->numCenters = kmeans->numCenters ;
self->centers = NULL ;
self->centerDistances = NULL ;
self->numTrees = kmeans->numTrees;
self->maxNumComparisons = kmeans->maxNumComparisons;
if (kmeans->centers) {
vl_size dataSize = vl_get_type_size(self->dataType) * self->dimension * self->numCenters ;
self->centers = vl_malloc(dataSize) ;
memcpy (self->centers, kmeans->centers, dataSize) ;
}
if (kmeans->centerDistances) {
vl_size dataSize = vl_get_type_size(self->dataType) * self->numCenters * self->numCenters ;
self->centerDistances = vl_malloc(dataSize) ;
memcpy (self->centerDistances, kmeans->centerDistances, dataSize) ;
}
return self ;
}
/** ------------------------------------------------------------------
** @brief Deletes a KMeans object
** @param self KMeans object instance.
**
** The function deletes the KMeans object instance created
** by ::vl_kmeans_new.
**/
VL_EXPORT void
vl_kmeans_delete (VlKMeans * self)
{
vl_kmeans_reset (self) ;
vl_free (self) ;
}
/* an helper structure */
typedef struct _VlKMeansSortWrapper {
vl_uint32 * permutation ;
void const * data ;
vl_size stride ;
} VlKMeansSortWrapper ;
/* ---------------------------------------------------------------- */
/* Instantiate shuffle algorithm */
#define VL_SHUFFLE_type vl_uindex
#define VL_SHUFFLE_prefix _vl_kmeans
#include "shuffle-def.h"
/* #ifdef VL_KMEANS_INSTANTITATING */
#endif
/* ================================================================ */
#ifdef VL_KMEANS_INSTANTIATING
/* ---------------------------------------------------------------- */
/* Set centers */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_set_centers_, SFX)
(VlKMeans * self,
TYPE const * centers,
vl_size dimension,
vl_size numCenters)
{
self->dimension = dimension ;
self->numCenters = numCenters ;
self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ;
memcpy ((TYPE*)self->centers, centers,
sizeof(TYPE) * dimension * numCenters) ;
}
/* ---------------------------------------------------------------- */
/* Random seeding */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_init_centers_with_rand_data_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_uindex i, j, k ;
VlRand * rand = vl_get_rand () ;
self->dimension = dimension ;
self->numCenters = numCenters ;
self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ;
{
vl_uindex * perm = vl_malloc (sizeof(vl_uindex) * numData) ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
TYPE * distances = vl_malloc (sizeof(TYPE) * numCenters) ;
/* get a random permutation of the data point */
for (i = 0 ; i < numData ; ++i) perm[i] = i ;
_vl_kmeans_shuffle (perm, numData, rand) ;
for (k = 0, i = 0 ; k < numCenters ; ++ i) {
/* compare the next data point to all centers collected so far
to detect duplicates (if there are enough left)
*/
if (numCenters - k < numData - i) {
vl_bool duplicateDetected = VL_FALSE ;
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distances,
dimension,
data + dimension * perm[i], 1,
(TYPE*)self->centers, k,
distFn) ;
for (j = 0 ; j < k ; ++j) {
duplicateDetected |= (distances[j] == 0) ;
}
if (duplicateDetected) continue ;
}
/* ok, it is not a duplicate so we can accept it! */
memcpy ((TYPE*)self->centers + dimension * k,
data + dimension * perm[i],
sizeof(TYPE) * dimension) ;
k ++ ;
}
vl_free(distances) ;
vl_free(perm) ;
}
}
/* ---------------------------------------------------------------- */
/* kmeans++ seeding */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_init_centers_plus_plus_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_uindex x, c ;
VlRand * rand = vl_get_rand () ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
TYPE * minDistances = vl_malloc (sizeof(TYPE) * numData) ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
self->dimension = dimension ;
self->numCenters = numCenters ;
self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ;
for (x = 0 ; x < numData ; ++x) {
minDistances[x] = (TYPE) VL_INFINITY_D ;
}
/* select the first point at random */
x = vl_rand_uindex (rand, numData) ;
c = 0 ;
while (1) {
TYPE energy = 0 ;
TYPE acc = 0 ;
TYPE thresh = (TYPE) vl_rand_real1 (rand) ;
memcpy ((TYPE*)self->centers + c * dimension,
data + x * dimension,
sizeof(TYPE) * dimension) ;
c ++ ;
if (c == numCenters) break ;
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)
(distances,
dimension,
(TYPE*)self->centers + (c - 1) * dimension, 1,
data, numData,
distFn) ;
for (x = 0 ; x < numData ; ++x) {
minDistances[x] = VL_MIN(minDistances[x], distances[x]) ;
energy += minDistances[x] ;
}
for (x = 0 ; x < numData - 1 ; ++x) {
acc += minDistances[x] ;
if (acc >= thresh * energy) break ;
}
}
vl_free(distances) ;
vl_free(minDistances) ;
}
/* ---------------------------------------------------------------- */
/* Quantization */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_quantize_, SFX)
(VlKMeans * self,
vl_uint32 * assignments,
TYPE * distances,
TYPE const * data,
vl_size numData)
{
vl_index i = 0 ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
#ifdef _OPENMP
#pragma omp parallel \
shared(self, distances, assignments, numData, distFn, data) \
num_threads(vl_get_max_threads())
#endif
{
/* vl_malloc cannot be used here if mapped to MATLAB malloc */
TYPE * distanceToCenters = malloc(sizeof(TYPE) * self->numCenters) ;
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0 ; i < (signed)numData ; ++i) {
vl_uindex k ;
TYPE bestDistance = (TYPE) VL_INFINITY_D ;
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distanceToCenters,
self->dimension,
data + self->dimension * i, 1,
(TYPE*)self->centers, self->numCenters,
distFn) ;
for (k = 0 ; k < self->numCenters ; ++k) {
if (distanceToCenters[k] < bestDistance) {
bestDistance = distanceToCenters[k] ;
assignments[i] = (vl_uint32)k ;
}
}
if (distances) distances[i] = bestDistance ;
}
free(distanceToCenters) ;
}
}
/* ---------------------------------------------------------------- */
/* ANN quantization */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_quantize_ann_, SFX)
(VlKMeans * self,
vl_uint32 * assignments,
TYPE * distances,
TYPE const * data,
vl_size numData,
vl_bool update)
{
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
VlKDForest * forest = vl_kdforest_new(self->dataType,self->dimension,self->numTrees, self->distance) ;
vl_kdforest_set_max_num_comparisons(forest,self->maxNumComparisons);
vl_kdforest_set_thresholding_method(forest,VL_KDTREE_MEDIAN);
vl_kdforest_build(forest,self->numCenters,self->centers);
#ifdef _OPENMP
#pragma omp parallel \
num_threads(vl_get_max_threads()) \
shared(self, forest, update, assignments, distances, data, numData, distFn)
#endif
{
VlKDForestNeighbor neighbor ;
VlKDForestSearcher * searcher ;
vl_index x;
#ifdef _OPENMP
#pragma omp critical
#endif
searcher = vl_kdforest_new_searcher (forest) ;
#ifdef _OPENMP
#pragma omp for
#endif
for(x = 0 ; x < (signed)numData ; ++x) {
vl_kdforestsearcher_query (searcher, &neighbor, 1, (TYPE const *) (data + x*self->dimension));
if (distances) {
if(!update) {
distances[x] = (TYPE) neighbor.distance;
assignments[x] = (vl_uint32) neighbor.index ;
} else {
TYPE prevDist = (TYPE) distFn(self->dimension,
data + self->dimension * x,
(TYPE*)self->centers + self->dimension *assignments[x]);
if (prevDist > (TYPE) neighbor.distance) {
distances[x] = (TYPE) neighbor.distance ;
assignments[x] = (vl_uint32) neighbor.index ;
} else {
distances[x] = prevDist ;
}
}
} else {
assignments[x] = (vl_uint32) neighbor.index ;
}
} /* end for */
} /* end of parallel region */
vl_kdforest_delete(forest);
}
/* ---------------------------------------------------------------- */
/* Helper functions */
/* ---------------------------------------------------------------- */
/* The sorting routine is used to find increasing permutation of each
* data dimension. This is used to quickly find the median for l1
* distance clustering. */
VL_INLINE TYPE
VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp)
(VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB)
{
return
((TYPE*)array->data) [array->permutation[indexA] * array->stride]
-
((TYPE*)array->data) [array->permutation[indexB] * array->stride] ;
}
VL_INLINE void
VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap)
(VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB)
{
vl_uint32 tmp = array->permutation[indexA] ;
array->permutation[indexA] = array->permutation[indexB] ;
array->permutation[indexB] = tmp ;
}
#define VL_QSORT_prefix VL_XCAT3(_vl_kmeans_, SFX, _qsort)
#define VL_QSORT_array VlKMeansSortWrapper*
#define VL_QSORT_cmp VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp)
#define VL_QSORT_swap VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap)
#include "qsort-def.h"
static void
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)
(VlKMeans * self, vl_uint32 * permutations, TYPE const * data, vl_size numData)
{
vl_uindex d, x ;
for (d = 0 ; d < self->dimension ; ++d) {
VlKMeansSortWrapper array ;
array.permutation = permutations + d * numData ;
array.data = data + d ;
array.stride = self->dimension ;
for (x = 0 ; x < numData ; ++x) {
array.permutation[x] = (vl_uint32)x ;
}
VL_XCAT3(_vl_kmeans_, SFX, _qsort_sort)(&array, numData) ;
}
}
/* ---------------------------------------------------------------- */
/* Lloyd refinement */
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
vl_size c, d, x, iteration ;
double previousEnergy = VL_INFINITY_D ;
double initialEnergy = VL_INFINITY_D ;
double energy ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ;
vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ;
vl_uint32 * permutations = NULL ;
vl_size * numSeenSoFar = NULL ;
VlRand * rand = vl_get_rand () ;
vl_size totNumRestartedCenters = 0 ;
vl_size numRestartedCenters = 0 ;
if (self->distance == VlDistanceL1) {
permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ;
numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ;
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ;
}
for (energy = VL_INFINITY_D,
iteration = 0;
1 ;
++ iteration) {
/* assign data to cluters */
VL_XCAT(_vl_kmeans_quantize_, SFX)(self, assignments, distances, data, numData) ;
/* compute energy */
energy = 0 ;
for (x = 0 ; x < numData ; ++x) energy += distances[x] ;
if (self->verbosity) {
VL_PRINTF("kmeans: Lloyd iter %d: energy = %g\n", iteration,
energy) ;
}
/* check termination conditions */
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("kmeans: Lloyd terminating because maximum number of iterations reached\n") ;
}
break ;
}
if (energy == previousEnergy) {
if (self->verbosity) {
VL_PRINTF("kmeans: Lloyd terminating because the algorithm fully converged\n") ;
}
break ;
}
if (iteration == 0) {
initialEnergy = energy ;
} else {
double eps = (previousEnergy - energy) / (initialEnergy - energy) ;
if (eps < self->minEnergyVariation) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ;
}
break ;
}
}
/* begin next iteration */
previousEnergy = energy ;
/* update clusters */
memset(clusterMasses, 0, sizeof(vl_size) * numData) ;
for (x = 0 ; x < numData ; ++x) {
clusterMasses[assignments[x]] ++ ;
}
numRestartedCenters = 0 ;
switch (self->distance) {
case VlDistanceL2:
memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ;
for (x = 0 ; x < numData ; ++x) {
TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ;
TYPE const * xpt = data + x * self->dimension ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] += xpt[d] ;
}
}
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
if (clusterMasses[c] > 0) {
TYPE mass = clusterMasses[c] ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] /= mass ;
}
} else {
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
case VlDistanceL1:
for (d = 0 ; d < self->dimension ; ++d) {
vl_uint32 * perm = permutations + d * numData ;
memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ;
for (x = 0; x < numData ; ++x) {
c = assignments[perm[x]] ;
if (2 * numSeenSoFar[c] < clusterMasses[c]) {
((TYPE*)self->centers) [d + c * self->dimension] =
data [d + perm[x] * self->dimension] ;
}
numSeenSoFar[c] ++ ;
}
/* restart the centers as required */
for (c = 0 ; c < self->numCenters ; ++c) {
if (clusterMasses[c] == 0) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
}
break ;
default:
abort();
} /* done compute centers */
totNumRestartedCenters += numRestartedCenters ;
if (self->verbosity && numRestartedCenters) {
VL_PRINTF("kmeans: Lloyd iter %d: restarted %d centers\n", iteration,
numRestartedCenters) ;
}
} /* next Lloyd iteration */
if (permutations) {
vl_free(permutations) ;
}
if (numSeenSoFar) {
vl_free(numSeenSoFar) ;
}
vl_free(distances) ;
vl_free(assignments) ;
vl_free(clusterMasses) ;
return energy ;
}
static double
VL_XCAT(_vl_kmeans_update_center_distances_, SFX)
(VlKMeans * self)
{
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
if (! self->centerDistances) {
self->centerDistances = vl_malloc (sizeof(TYPE) *
self->numCenters *
self->numCenters) ;
}
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(self->centerDistances,
self->dimension,
self->centers, self->numCenters,
NULL, 0,
distFn) ;
return self->numCenters * (self->numCenters - 1) / 2 ;
}
static double
VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
vl_size c, d, x, iteration ;
double initialEnergy = VL_INFINITY_D ;
double previousEnergy = VL_INFINITY_D ;
double energy ;
vl_uint32 * permutations = NULL ;
vl_size * numSeenSoFar = NULL ;
VlRand * rand = vl_get_rand () ;
vl_size totNumRestartedCenters = 0 ;
vl_size numRestartedCenters = 0 ;
vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ;
vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
if (self->distance == VlDistanceL1) {
permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ;
numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ;
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ;
}
for (energy = VL_INFINITY_D,
iteration = 0;
1 ;
++ iteration) {
/* assign data to cluters */
VL_XCAT(_vl_kmeans_quantize_ann_, SFX)(self, assignments, distances, data, numData, iteration > 0) ;
/* compute energy */
energy = 0 ;
for (x = 0 ; x < numData ; ++x) energy += distances[x] ;
if (self->verbosity) {
VL_PRINTF("kmeans: ANN iter %d: energy = %g\n", iteration,
energy) ;
}
/* check termination conditions */
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the maximum number of iterations has been reached\n") ;
}
break ;
}
if (energy == previousEnergy) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the algorithm fully converged\n") ;
}
break ;
}
if (iteration == 0) {
initialEnergy = energy ;
} else {
double eps = (previousEnergy - energy) / (initialEnergy - energy) ;
if (eps < self->minEnergyVariation) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ;
}
break ;
}
}
/* begin next iteration */
previousEnergy = energy ;
/* update clusters */
memset(clusterMasses, 0, sizeof(vl_size) * numData) ;
for (x = 0 ; x < numData ; ++x) {
clusterMasses[assignments[x]] ++ ;
}
numRestartedCenters = 0 ;
switch (self->distance) {
case VlDistanceL2:
memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ;
for (x = 0 ; x < numData ; ++x) {
TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ;
TYPE const * xpt = data + x * self->dimension ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] += xpt[d] ;
}
}
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
if (clusterMasses[c] > 0) {
TYPE mass = clusterMasses[c] ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] /= mass ;
}
} else {
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
case VlDistanceL1:
for (d = 0 ; d < self->dimension ; ++d) {
vl_uint32 * perm = permutations + d * numData ;
memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ;
for (x = 0; x < numData ; ++x) {
c = assignments[perm[x]] ;
if (2 * numSeenSoFar[c] < clusterMasses[c]) {
((TYPE*)self->centers) [d + c * self->dimension] =
data [d + perm[x] * self->dimension] ;
}
numSeenSoFar[c] ++ ;
}
/* restart the centers as required */
for (c = 0 ; c < self->numCenters ; ++c) {
if (clusterMasses[c] == 0) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
}
break ;
default:
VL_PRINT("bad distance set: %d\n",self->distance);
abort();
} /* done compute centers */
totNumRestartedCenters += numRestartedCenters ;
if (self->verbosity && numRestartedCenters) {
VL_PRINTF("kmeans: ANN iter %d: restarted %d centers\n", iteration,
numRestartedCenters) ;
}
}
if (permutations) {
vl_free(permutations) ;
}
if (numSeenSoFar) {
vl_free(numSeenSoFar) ;
}
vl_free(distances) ;
vl_free(assignments) ;
vl_free(clusterMasses) ;
return energy ;
}
/* ---------------------------------------------------------------- */
/* Elkan refinement */
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
vl_size d, iteration ;
vl_index x ;
vl_uint32 c, j ;
vl_bool allDone ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ;
vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ;
VlRand * rand = vl_get_rand () ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
TYPE * nextCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ;
TYPE * pointToClosestCenterUB = vl_malloc (sizeof(TYPE) * numData) ;
vl_bool * pointToClosestCenterUBIsStrict = vl_malloc (sizeof(vl_bool) * numData) ;
TYPE * pointToCenterLB = vl_malloc (sizeof(TYPE) * numData * self->numCenters) ;
TYPE * newCenters = vl_malloc(sizeof(TYPE) * self->dimension * self->numCenters) ;
TYPE * centerToNewCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ;
vl_uint32 * permutations = NULL ;
vl_size * numSeenSoFar = NULL ;
double energy ;
vl_size totDistanceComputationsToInit = 0 ;
vl_size totDistanceComputationsToRefreshUB = 0 ;
vl_size totDistanceComputationsToRefreshLB = 0 ;
vl_size totDistanceComputationsToRefreshCenterDistances = 0 ;
vl_size totDistanceComputationsToNewCenters = 0 ;
vl_size totDistanceComputationsToFinalize = 0 ;
vl_size totNumRestartedCenters = 0 ;
if (self->distance == VlDistanceL1) {
permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ;
numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ;
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Initialization */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* An iteration is: get_new_centers + reassign + get_energy.
This counts as iteration 0, where get_new_centers is assumed
to be performed before calling the train function by
the initialization function */
/* update distances between centers */
totDistanceComputationsToInit +=
VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ;
/* assigmen points to the initial centers and initialize bounds */
memset(pointToCenterLB, 0, sizeof(TYPE) * self->numCenters * numData) ;
for (x = 0 ; x < (signed)numData ; ++x) {
TYPE distance ;
/* do the first center */
assignments[x] = 0 ;
distance = distFn(self->dimension,
data + x * self->dimension,
(TYPE*)self->centers + 0) ;
pointToClosestCenterUB[x] = distance ;
pointToClosestCenterUBIsStrict[x] = VL_TRUE ;
pointToCenterLB[0 + x * self->numCenters] = distance ;
totDistanceComputationsToInit += 1 ;
/* do other centers */
for (c = 1 ; c < self->numCenters ; ++c) {
/* Can skip if the center assigned so far is twice as close
as its distance to the center under consideration */
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <=
((TYPE*)self->centerDistances)
[c + assignments[x] * self->numCenters]) {
continue ;
}
distance = distFn(self->dimension,
data + x * self->dimension,
(TYPE*)self->centers + c * self->dimension) ;
pointToCenterLB[c + x * self->numCenters] = distance ;
totDistanceComputationsToInit += 1 ;
if (distance < pointToClosestCenterUB[x]) {
pointToClosestCenterUB[x] = distance ;
assignments[x] = c ;
}
}
}
/* compute UB on energy */
energy = 0 ;
for (x = 0 ; x < (signed)numData ; ++x) {
energy += pointToClosestCenterUB[x] ;
}
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan iter 0: energy = %g, dist. calc. = %d\n",
energy, totDistanceComputationsToInit) ;
}
/* #define SANITY*/
#ifdef SANITY
{
int xx ;
int cc ;
TYPE tol = 1e-5 ;
VL_PRINTF("inconsistencies after initial assignments:\n");
for (xx = 0 ; xx < numData ; ++xx) {
for (cc = 0 ; cc < self->numCenters ; ++cc) {
TYPE a = pointToCenterLB[cc + xx * self->numCenters] ;
TYPE b = distFn(self->dimension,
data + self->dimension * xx,
(TYPE*)self->centers + self->dimension * cc) ;
if (cc == assignments[xx]) {
TYPE z = pointToClosestCenterUB[xx] ;
if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n",
cc, xx, z, b) ;
}
if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f\n",
cc, xx, a, b) ;
}
}
}
#endif
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Iterations */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
for (iteration = 1 ; 1; ++iteration) {
vl_size numDistanceComputationsToRefreshUB = 0 ;
vl_size numDistanceComputationsToRefreshLB = 0 ;
vl_size numDistanceComputationsToRefreshCenterDistances = 0 ;
vl_size numDistanceComputationsToNewCenters = 0 ;
vl_size numRestartedCenters = 0 ;
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Compute new centers */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
memset(clusterMasses, 0, sizeof(vl_size) * numData) ;
for (x = 0 ; x < (signed)numData ; ++x) {
clusterMasses[assignments[x]] ++ ;
}
switch (self->distance) {
case VlDistanceL2:
memset(newCenters, 0, sizeof(TYPE) * self->dimension * self->numCenters) ;
for (x = 0 ; x < (signed)numData ; ++x) {
TYPE * cpt = newCenters + assignments[x] * self->dimension ;
TYPE const * xpt = data + x * self->dimension ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] += xpt[d] ;
}
}
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE * cpt = newCenters + c * self->dimension ;
if (clusterMasses[c] > 0) {
TYPE mass = clusterMasses[c] ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] /= mass ;
}
} else {
/* restart the center */
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
case VlDistanceL1:
for (d = 0 ; d < self->dimension ; ++d) {
vl_uint32 * perm = permutations + d * numData ;
memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ;
for (x = 0; x < (signed)numData ; ++x) {
c = assignments[perm[x]] ;
if (2 * numSeenSoFar[c] < clusterMasses[c]) {
newCenters [d + c * self->dimension] =
data [d + perm[x] * self->dimension] ;
}
numSeenSoFar[c] ++ ;
}
}
/* restart the centers as required */
for (c = 0 ; c < self->numCenters ; ++c) {
if (clusterMasses[c] == 0) {
TYPE * cpt = newCenters + c * self->dimension ;
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
default:
abort();
} /* done compute centers */
/* compute the distance from the old centers to the new centers */
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE distance = distFn(self->dimension,
newCenters + c * self->dimension,
(TYPE*)self->centers + c * self->dimension) ;
centerToNewCenterDistances[c] = distance ;
numDistanceComputationsToNewCenters += 1 ;
}
/* make the new centers current */
{
TYPE * tmp = self->centers ;
self->centers = newCenters ;
newCenters = tmp ;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Reassign points to a centers */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/*
Update distances between centers.
*/
numDistanceComputationsToRefreshCenterDistances
+= VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ;
for (c = 0 ; c < self->numCenters ; ++c) {
nextCenterDistances[c] = (TYPE) VL_INFINITY_D ;
for (j = 0 ; j < self->numCenters ; ++j) {
if (j == c) continue ;
nextCenterDistances[c] = VL_MIN(nextCenterDistances[c],
((TYPE*)self->centerDistances)
[j + c * self->numCenters]) ;
}
}
/*
Update upper bounds on point-to-closest-center distances
based on the center variation.
*/
for (x = 0 ; x < (signed)numData ; ++x) {
TYPE a = pointToClosestCenterUB[x] ;
TYPE b = centerToNewCenterDistances[assignments[x]] ;
if (self->distance == VlDistanceL1) {
pointToClosestCenterUB[x] = a + b ;
} else {
#if (FLT == VL_TYPE_FLOAT)
TYPE sqrtab = sqrtf (a * b) ;
#else
TYPE sqrtab = sqrt (a * b) ;
#endif
pointToClosestCenterUB[x] = a + b + 2.0 * sqrtab ;
}
pointToClosestCenterUBIsStrict[x] = VL_FALSE ;
}
/*
Update lower bounds on point-to-center distances
based on the center variation.
*/
#if defined(_OPENMP)
#pragma omp parallel for default(shared) private(x,c) num_threads(vl_get_max_threads())
#endif
for (x = 0 ; x < (signed)numData ; ++x) {
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE a = pointToCenterLB[c + x * self->numCenters] ;
TYPE b = centerToNewCenterDistances[c] ;
if (a < b) {
pointToCenterLB[c + x * self->numCenters] = 0 ;
} else {
if (self->distance == VlDistanceL1) {
pointToCenterLB[c + x * self->numCenters] = a - b ;
} else {
#if (FLT == VL_TYPE_FLOAT)
TYPE sqrtab = sqrtf (a * b) ;
#else
TYPE sqrtab = sqrt (a * b) ;
#endif
pointToCenterLB[c + x * self->numCenters] = a + b - 2.0 * sqrtab ;
}
}
}
}
#ifdef SANITY
{
int xx ;
int cc ;
TYPE tol = 1e-5 ;
VL_PRINTF("inconsistencies before assignments:\n");
for (xx = 0 ; xx < numData ; ++xx) {
for (cc = 0 ; cc < self->numCenters ; ++cc) {
TYPE a = pointToCenterLB[cc + xx * self->numCenters] ;
TYPE b = distFn(self->dimension,
data + self->dimension * xx,
(TYPE*)self->centers + self->dimension * cc) ;
if (cc == assignments[xx]) {
TYPE z = pointToClosestCenterUB[xx] ;
if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n",
cc, xx, z, b) ;
}
if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n",
cc, xx, a, b, assignments[xx]) ;
}
}
}
#endif
/*
Scan the data and do the reassignments. Use the bounds to
skip as many point-to-center distance calculations as possible.
*/
allDone = VL_TRUE ;
#if defined(_OPENMP)
#pragma omp parallel for \
shared(self,numData, \
pointToClosestCenterUB,pointToCenterLB, \
nextCenterDistances,pointToClosestCenterUBIsStrict, \
assignments,data,distFn,allDone) \
private(c,x) \
reduction(+:numDistanceComputationsToRefreshUB,numDistanceComputationsToRefreshLB) \
num_threads(vl_get_max_threads())
#endif
for (x = 0 ; x < (signed)numData ; ++ x) {
/*
A point x sticks with its current center assignmets[x]
the UB to d(x, c[assigmnets[x]]) is not larger than half
the distance of c[assigments[x]] to any other center c.
*/
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <= nextCenterDistances[assignments[x]]) {
continue ;
}
for (c = 0 ; c < self->numCenters ; ++c) {
vl_uint32 cx = assignments[x] ;
TYPE distance ;
/* The point is not reassigned to a given center c
if either:
0 - c is already the assigned center
1 - The UB of d(x, c[assignments[x]]) is smaller than half
the distance of c[assigments[x]] to c, OR
2 - The UB of d(x, c[assignmets[x]]) is smaller than the
LB of the distance of x to c.
*/
if (cx == c) {
continue ;
}
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances)
[c + cx * self->numCenters]) {
continue ;
}
if (pointToClosestCenterUB[x] <= pointToCenterLB
[c + x * self->numCenters]) {
continue ;
}
/* If the UB is loose, try recomputing it and test again */
if (! pointToClosestCenterUBIsStrict[x]) {
distance = distFn(self->dimension,
data + self->dimension * x,
(TYPE*)self->centers + self->dimension * cx) ;
pointToClosestCenterUB[x] = distance ;
pointToClosestCenterUBIsStrict[x] = VL_TRUE ;
pointToCenterLB[cx + x * self->numCenters] = distance ;
numDistanceComputationsToRefreshUB += 1 ;
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances)
[c + cx * self->numCenters]) {
continue ;
}
if (pointToClosestCenterUB[x] <= pointToCenterLB
[c + x * self->numCenters]) {
continue ;
}
}
/*
Now the UB is strict (equal to d(x, assignments[x])), but
we still could not exclude that x should be reassigned to
c. We therefore compute the distance, update the LB,
and check if a reassigmnet must be made
*/
distance = distFn(self->dimension,
data + x * self->dimension,
(TYPE*)self->centers + c * self->dimension) ;
numDistanceComputationsToRefreshLB += 1 ;
pointToCenterLB[c + x * self->numCenters] = distance ;
if (distance < pointToClosestCenterUB[x]) {
assignments[x] = c ;
pointToClosestCenterUB[x] = distance ;
allDone = VL_FALSE ;
/* the UB strict flag is already set here */
}
} /* assign center */
} /* next data point */
totDistanceComputationsToRefreshUB
+= numDistanceComputationsToRefreshUB ;
totDistanceComputationsToRefreshLB
+= numDistanceComputationsToRefreshLB ;
totDistanceComputationsToRefreshCenterDistances
+= numDistanceComputationsToRefreshCenterDistances ;
totDistanceComputationsToNewCenters
+= numDistanceComputationsToNewCenters ;
totNumRestartedCenters
+= numRestartedCenters ;
#ifdef SANITY
{
int xx ;
int cc ;
TYPE tol = 1e-5 ;
VL_PRINTF("inconsistencies after assignments:\n");
for (xx = 0 ; xx < numData ; ++xx) {
for (cc = 0 ; cc < self->numCenters ; ++cc) {
TYPE a = pointToCenterLB[cc + xx * self->numCenters] ;
TYPE b = distFn(self->dimension,
data + self->dimension * xx,
(TYPE*)self->centers + self->dimension * cc) ;
if (cc == assignments[xx]) {
TYPE z = pointToClosestCenterUB[xx] ;
if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n",
cc, xx, z, b) ;
}
if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n",
cc, xx, a, b, assignments[xx]) ;
}
}
}
#endif
/* compute UB on energy */
energy = 0 ;
for (x = 0 ; x < (signed)numData ; ++x) {
energy += pointToClosestCenterUB[x] ;
}
if (self->verbosity) {
vl_size numDistanceComputations =
numDistanceComputationsToRefreshUB +
numDistanceComputationsToRefreshLB +
numDistanceComputationsToRefreshCenterDistances +
numDistanceComputationsToNewCenters ;
VL_PRINTF("kmeans: Elkan iter %d: energy <= %g, dist. calc. = %d\n",
iteration,
energy,
numDistanceComputations) ;
if (numRestartedCenters) {
VL_PRINTF("kmeans: Elkan iter %d: restarted %d centers\n",
iteration,
energy,
numRestartedCenters) ;
}
if (self->verbosity > 1) {
VL_PRINTF("kmeans: Elkan iter %d: total dist. calc. per type: "
"UB: %.1f%% (%d), LB: %.1f%% (%d), "
"intra_center: %.1f%% (%d), "
"new_center: %.1f%% (%d)\n",
iteration,
100.0 * numDistanceComputationsToRefreshUB / numDistanceComputations,
numDistanceComputationsToRefreshUB,
100.0 *numDistanceComputationsToRefreshLB / numDistanceComputations,
numDistanceComputationsToRefreshLB,
100.0 * numDistanceComputationsToRefreshCenterDistances / numDistanceComputations,
numDistanceComputationsToRefreshCenterDistances,
100.0 * numDistanceComputationsToNewCenters / numDistanceComputations,
numDistanceComputationsToNewCenters) ;
}
}
/* check termination conditions */
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan terminating because maximum number of iterations reached\n") ;
}
break ;
}
if (allDone) {
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan terminating because the algorithm fully converged\n") ;
}
break ;
}
} /* next Elkan iteration */
/* compute true energy */
energy = 0 ;
for (x = 0 ; x < (signed)numData ; ++ x) {
vl_uindex cx = assignments [x] ;
energy += distFn(self->dimension,
data + self->dimension * x,
(TYPE*)self->centers + self->dimension * cx) ;
totDistanceComputationsToFinalize += 1 ;
}
{
vl_size totDistanceComputations =
totDistanceComputationsToInit +
totDistanceComputationsToRefreshUB +
totDistanceComputationsToRefreshLB +
totDistanceComputationsToRefreshCenterDistances +
totDistanceComputationsToNewCenters +
totDistanceComputationsToFinalize ;
double saving = (double)totDistanceComputations
/ (iteration * self->numCenters * numData) ;
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan: total dist. calc.: %d (%.2f %% of Lloyd)\n",
totDistanceComputations, saving * 100.0) ;
if (totNumRestartedCenters) {
VL_PRINTF("kmeans: Elkan: there have been %d restarts\n",
totNumRestartedCenters) ;
}
}
if (self->verbosity > 1) {
VL_PRINTF("kmeans: Elkan: total dist. calc. per type: "
"init: %.1f%% (%d), UB: %.1f%% (%d), LB: %.1f%% (%d), "
"intra_center: %.1f%% (%d), "
"new_center: %.1f%% (%d), "
"finalize: %.1f%% (%d)\n",
100.0 * totDistanceComputationsToInit / totDistanceComputations,
totDistanceComputationsToInit,
100.0 * totDistanceComputationsToRefreshUB / totDistanceComputations,
totDistanceComputationsToRefreshUB,
100.0 *totDistanceComputationsToRefreshLB / totDistanceComputations,
totDistanceComputationsToRefreshLB,
100.0 * totDistanceComputationsToRefreshCenterDistances / totDistanceComputations,
totDistanceComputationsToRefreshCenterDistances,
100.0 * totDistanceComputationsToNewCenters / totDistanceComputations,
totDistanceComputationsToNewCenters,
100.0 * totDistanceComputationsToFinalize / totDistanceComputations,
totDistanceComputationsToFinalize) ;
}
}
if (permutations) {
vl_free(permutations) ;
}
if (numSeenSoFar) {
vl_free(numSeenSoFar) ;
}
vl_free(distances) ;
vl_free(assignments) ;
vl_free(clusterMasses) ;
vl_free(nextCenterDistances) ;
vl_free(pointToClosestCenterUB) ;
vl_free(pointToClosestCenterUBIsStrict) ;
vl_free(pointToCenterLB) ;
vl_free(newCenters) ;
vl_free(centerToNewCenterDistances) ;
return energy ;
}
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_kmeans_refine_centers_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
switch (self->algorithm) {
case VlKMeansLloyd:
return
VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX)(self, data, numData) ;
break ;
case VlKMeansElkan:
return
VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX)(self, data, numData) ;
break ;
case VlKMeansANN:
return
VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX)(self, data, numData) ;
break ;
default:
abort() ;
}
}
/* VL_KMEANS_INSTANTIATING */
#else
#ifndef __DOXYGEN__
#define FLT VL_TYPE_FLOAT
#define TYPE float
#define SFX f
#define VL_KMEANS_INSTANTIATING
#include "kmeans.c"
#define FLT VL_TYPE_DOUBLE
#define TYPE double
#define SFX d
#define VL_KMEANS_INSTANTIATING
#include "kmeans.c"
#endif
/* VL_KMEANS_INSTANTIATING */
#endif
/* ================================================================ */
#ifndef VL_KMEANS_INSTANTIATING
/** ------------------------------------------------------------------
** @brief Set centers
** @param self KMeans object.
** @param centers centers to copy.
** @param dimension data dimension.
** @param numCenters number of centers.
**/
VL_EXPORT void
vl_kmeans_set_centers
(VlKMeans * self,
void const * centers,
vl_size dimension,
vl_size numCenters)
{
vl_kmeans_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_set_centers_f
(self, (float const *)centers, dimension, numCenters) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_set_centers_d
(self, (double const *)centers, dimension, numCenters) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief init centers by randomly sampling data
** @param self KMeans object.
** @param data data to sample from.
** @param dimension data dimension.
** @param numData nmber of data points.
** @param numCenters number of centers.
**
** The function inits the KMeans centers by randomly sampling
** the data @a data.
**/
VL_EXPORT void
vl_kmeans_init_centers_with_rand_data
(VlKMeans * self,
void const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_kmeans_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_init_centers_with_rand_data_f
(self, (float const *)data, dimension, numData, numCenters) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_init_centers_with_rand_data_d
(self, (double const *)data, dimension, numData, numCenters) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Seed centers by the KMeans++ algorithm
** @param self KMeans object.
** @param data data to sample from.
** @param dimension data dimension.
** @param numData nmber of data points.
** @param numCenters number of centers.
**/
VL_EXPORT void
vl_kmeans_init_centers_plus_plus
(VlKMeans * self,
void const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_kmeans_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_init_centers_plus_plus_f
(self, (float const *)data, dimension, numData, numCenters) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_init_centers_plus_plus_d
(self, (double const *)data, dimension, numData, numCenters) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Quantize data
** @param self KMeans object.
** @param assignments data to closest center assignments (output).
** @param distances data to closest center distance (output).
** @param data data to quantize.
** @param numData number of data points to quantize.
**/
VL_EXPORT void
vl_kmeans_quantize
(VlKMeans * self,
vl_uint32 * assignments,
void * distances,
void const * data,
vl_size numData)
{
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_quantize_f
(self, assignments, distances, (float const *)data, numData) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_quantize_d
(self, assignments, distances, (double const *)data, numData) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Quantize data using approximate nearest neighbours (ANN).
** @param self KMeans object.
** @param assignments data to centers assignments (output).
** @param distances data to closes center distance (output)
** @param data data to quantize.
** @param numData number of data points.
** @param update choose wether to update current assignments.
**
** The function uses an ANN procedure to compute the approximate
** nearest neighbours of the input data point.
**
** Setting @a update to ::VL_TRUE will cause the algorithm
** to *update existing assignments*. This means that each
** element of @a assignments and @a distances is updated ony if the
** ANN procedure can find a better assignment of the existing one.
**/
VL_EXPORT void
vl_kmeans_quantize_ann
(VlKMeans * self,
vl_uint32 * assignments,
void * distances,
void const * data,
vl_size numData,
vl_bool update)
{
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_quantize_ann_f
(self, assignments, distances, (float const *)data, numData, update) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_quantize_ann_d
(self, assignments, distances, (double const *)data, numData, update) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Refine center locations.
** @param self KMeans object.
** @param data data to quantize.
** @param numData number of data points.
** @return K-means energy at the end of optimization.
**
** The function calls the underlying K-means quantization algorithm
** (@ref VlKMeansAlgorithm) to quantize the specified data @a data.
** The function assumes that the cluster centers have already
** been assigned by using one of the seeding functions, or by
** setting them.
**/
VL_EXPORT double
vl_kmeans_refine_centers
(VlKMeans * self,
void const * data,
vl_size numData)
{
assert (self->centers) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
return
_vl_kmeans_refine_centers_f
(self, (float const *)data, numData) ;
case VL_TYPE_DOUBLE :
return
_vl_kmeans_refine_centers_d
(self, (double const *)data, numData) ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Cluster data.
** @param self KMeans object.
** @param data data to quantize.
** @param dimension data dimension.
** @param numData number of data points.
** @param numCenters number of clusters.
** @return K-means energy at the end of optimization.
**
** The function initializes the centers by using the initialization
** algorithm set by ::vl_kmeans_set_initialization and refines them
** by the quantization algorithm set by ::vl_kmeans_set_algorithm.
** The process is repeated one or more times (see
** ::vl_kmeans_set_num_repetitions) and the resutl with smaller
** energy is retained.
**/
VL_EXPORT double
vl_kmeans_cluster (VlKMeans * self,
void const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_uindex repetition ;
double bestEnergy = VL_INFINITY_D ;
void * bestCenters = NULL ;
for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) {
double energy ;
double timeRef ;
if (self->verbosity) {
VL_PRINTF("kmeans: repetition %d of %d\n", repetition + 1, self->numRepetitions) ;
}
timeRef = vl_get_cpu_time() ;
switch (self->initialization) {
case VlKMeansRandomSelection :
vl_kmeans_init_centers_with_rand_data (self,
data, dimension, numData,
numCenters) ;
break ;
case VlKMeansPlusPlus :
vl_kmeans_init_centers_plus_plus (self,
data, dimension, numData,
numCenters) ;
break ;
default:
abort() ;
}
if (self->verbosity) {
VL_PRINTF("kmeans: K-means initialized in %.2f s\n",
vl_get_cpu_time() - timeRef) ;
}
timeRef = vl_get_cpu_time () ;
energy = vl_kmeans_refine_centers (self, data, numData) ;
if (self->verbosity) {
VL_PRINTF("kmeans: K-means terminated in %.2f s with energy %g\n",
vl_get_cpu_time() - timeRef, energy) ;
}
/* copy centers to output if current solution is optimal */
/* check repetition == 0 as well in case energy = NaN, which */
/* can happen if the data contain NaNs */
if (energy < bestEnergy || repetition == 0) {
void * temp ;
bestEnergy = energy ;
if (bestCenters == NULL) {
bestCenters = vl_malloc(vl_get_type_size(self->dataType) *
self->dimension *
self->numCenters) ;
}
/* swap buffers */
temp = bestCenters ;
bestCenters = self->centers ;
self->centers = temp ;
} /* better energy */
} /* next repetition */
vl_free (self->centers) ;
self->centers = bestCenters ;
return bestEnergy ;
}
/* VL_KMEANS_INSTANTIATING */
#endif
#undef SFX
#undef TYPE
#undef FLT
#undef VL_KMEANS_INSTANTIATING
|
ques14.c | #include<stdio.h>
#include<omp.h>
#include<math.h>
main()
{
int n,i,j;
int arr[1000]={0};
printf("Enter n\n");
scanf("%d",&n);
for(i=2;i<=sqrt(n);i++)
{
if(arr[i]==0)
{
#pragma omp parallel for
for(j=i*i;j<=n;j=j+i)
arr[j]=1;
}
}
for(i=2;i<=n;i++)
if(arr[i]==0)
printf("%d\n",i);
}
|
GB_binop__lor_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lor_uint32
// A.*B function (eWiseMult): GB_AemultB__lor_uint32
// A*D function (colscale): GB_AxD__lor_uint32
// D*A function (rowscale): GB_DxB__lor_uint32
// C+=B function (dense accum): GB_Cdense_accumB__lor_uint32
// C+=b function (dense accum): GB_Cdense_accumb__lor_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_uint32
// C=scalar+B GB_bind1st__lor_uint32
// C=scalar+B' GB_bind1st_tran__lor_uint32
// C=A+scalar GB_bind2nd__lor_uint32
// C=A'+scalar GB_bind2nd_tran__lor_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) || (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_UINT32 || GxB_NO_LOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lor_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lor_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lor_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lor_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lor_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__lor_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lor_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lor_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lor_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__lor_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__lor_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hello.c | #include <stdio.h>
#include <omp.h>
int main(int argc, char **argv)
{
#pragma omp parallel num_threads(6)
{
printf("Hello, multithreaded world: thread %d of %d\n",
omp_get_thread_num(), omp_get_num_threads());
}
return 0;
}
|
batchnorm_arm_func.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef BATCHNORM_OP
#pragma once
#include <cmath>
#include "operators/op_param.h"
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
#endif // __ARM_NEON__
namespace paddle_mobile {
namespace operators {
template <typename P>
void BatchnormCompute(const BatchNormParam<CPU> ¶m) {
const float epsilon = param.Epsilon();
const float *mean_ptr = param.InputMean()->data<float>();
const float *variance_ptr = param.InputVariance()->data<float>();
const float *scale_ptr = param.InputScale()->data<float>();
const float *bias_ptr = param.InputBias()->data<float>();
const framework::Tensor *input = param.InputX();
const float *input_ptr = input->data<float>();
framework::Tensor *output = param.OutputY();
float *output_ptr = output->mutable_data<float>();
size_t spatial_size = output->dims()[2] * output->dims()[3];
int channels = output->dims()[1];
#pragma omp parallel for collapse(2)
for (int batch = 0; batch < output->dims()[0]; ++batch) {
for (int c = 0; c < channels; ++c) {
float inv_scale = 1.f / (std::sqrt(variance_ptr[c] + epsilon));
float bias = bias_ptr[c] - inv_scale * scale_ptr[c] * mean_ptr[c];
float scale = inv_scale * scale_ptr[c];
size_t offset = (batch * channels + c) * spatial_size;
const float *x = input_ptr + offset;
float *y = output_ptr + offset;
size_t remain = spatial_size;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
int loop = spatial_size >> 4;
remain = spatial_size & 0xF;
float32x4_t __scale = vdupq_n_f32(scale);
float32x4_t __bias = vdupq_n_f32(bias);
for (int k = 0; k < loop; ++k, x += 16, y += 16) {
float32x4_t r0 = vld1q_f32(x);
float32x4_t r1 = vld1q_f32(x + 4);
float32x4_t r2 = vld1q_f32(x + 8);
float32x4_t r3 = vld1q_f32(x + 12);
r0 = vmlaq_f32(__bias, __scale, r0);
r1 = vmlaq_f32(__bias, __scale, r1);
r2 = vmlaq_f32(__bias, __scale, r2);
r3 = vmlaq_f32(__bias, __scale, r3);
vst1q_f32(y, r0);
vst1q_f32(y + 4, r1);
vst1q_f32(y + 8, r2);
vst1q_f32(y + 12, r3);
}
#endif // __ARM_NEON__
for (int k = 0; k < remain; ++k) {
y[k] = scale * x[k] + bias;
}
}
}
}
} // namespace operators
} // namespace paddle_mobile
#endif
|
3_helloworld3.c | // Condicional compilation Facility
#include <stdio.h>
#include <omp.h>
int main(int argc, char** argv) {
int thread_num = 0;
printf("Hello World :)\n");
#pragma omp parallel
{
#if _OPENMP
thread_num = omp_get_thread_num();
#endif
printf("Hello World from thread %d\n",thread_num);
}
printf("E agora??? %d\n", thread_num);
return 0;
}
|
GB_binop__lor_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lor_fp64
// A.*B function (eWiseMult): GB_AemultB__lor_fp64
// A*D function (colscale): GB_AxD__lor_fp64
// D*A function (rowscale): GB_DxB__lor_fp64
// C+=B function (dense accum): GB_Cdense_accumB__lor_fp64
// C+=b function (dense accum): GB_Cdense_accumb__lor_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_fp64
// C=scalar+B GB_bind1st__lor_fp64
// C=scalar+B' GB_bind1st_tran__lor_fp64
// C=A+scalar GB_bind2nd__lor_fp64
// C=A'+scalar GB_bind2nd_tran__lor_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ((x != 0) || (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_FP64 || GxB_NO_LOR_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lor_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lor_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double bij = Bx [p] ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lor_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__lor_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_int32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int32_int16
// op(A') function: GB_tran__abs_int32_int16
// C type: int32_t
// A type: int16_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int32_int16
(
int32_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_uint8_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_int16
// op(A') function: GB_tran__minv_uint8_int16
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_int16
(
uint8_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
core_clauum.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlauum.c, normal z -> c, Fri Sep 28 17:38:22 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_lauum
*
* Computes the product U * U^H or L^H * L, where the triangular
* factor U or L is stored in the upper or lower triangular part of
* the array A.
*
* If uplo = 'U' or 'u' then the upper triangle of the result is stored,
* overwriting the factor U in A.
* If uplo = 'L' or 'l' then the lower triangle of the result is stored,
* overwriting the factor L in A.
*
*******************************************************************************
*
* @param[in] uplo
* = PlasmaUpper: Upper triangle of A is stored;
* = PlasmaLower: Lower triangle of A is stored.
*
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] A
* On entry, the triangular factor U or L.
* On exit, if uplo = 'U', the upper triangle of A is
* overwritten with the upper triangle of the product U * U^H;
* if uplo = 'L', the lower triangle of A is overwritten with
* the lower triangle of the product L^H * L.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @param[out] info
* - 0 on successful exit
* - < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
__attribute__((weak))
int plasma_core_clauum(plasma_enum_t uplo,
int n,
plasma_complex32_t *A, int lda)
{
return LAPACKE_clauum_work(LAPACK_COL_MAJOR,
lapack_const(uplo), n, A, lda);
}
/******************************************************************************/
void plasma_core_omp_clauum(plasma_enum_t uplo,
int n,
plasma_complex32_t *A, int lda,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A[0:lda*n])
{
if (sequence->status == PlasmaSuccess) {
int info = plasma_core_clauum(uplo, n, A, lda);
if (info != PlasmaSuccess) {
plasma_coreblas_error("core_clauum() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
GB_unaryop__lnot_uint32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint32_bool
// op(A') function: GB_tran__lnot_uint32_bool
// C type: uint32_t
// A type: bool
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint32_bool
(
uint32_t *Cx, // Cx and Ax may be aliased
bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
linAlgWeightedNorm2.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C"
void weightedNorm2(const dlong & Nblocks, const dlong & N,
const dfloat * __restrict__ cpu_w,
const dfloat * __restrict__ cpu_a,
dfloat * __restrict__ cpu_wa){
dfloat wa2 = 0;
#pragma omp parallel for reduction(+:wa2)
for(int i=0;i<N;++i){
const dfloat ai = cpu_a[i];
const dfloat wi = cpu_w[i];
wa2 += ai*ai*wi;
}
cpu_wa[0] = wa2;
}
extern "C"
void weightedNorm2Many(const dlong & Nblocks, const dlong & N,
const dlong & Nfields,
const dlong & offset,
const dfloat * __restrict__ cpu_w,
const dfloat * __restrict__ cpu_a,
dfloat * __restrict__ cpu_wa){
dfloat wa2 = 0;
#pragma omp parallel for collapse(2) reduction(+:wa2)
for(int fld=0;fld<Nfields;fld++) {
for(int i=0;i<N;++i){
const dlong id = i + fld*offset;
const dfloat ai = cpu_a[id];
const dfloat wi = cpu_w[i];
wa2 += ai*ai*wi;
}
}
cpu_wa[0] = wa2;
} |
GB_binop__islt_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_fp64)
// A*D function (colscale): GB (_AxD__islt_fp64)
// D*A function (rowscale): GB (_DxB__islt_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_fp64)
// C=scalar+B GB (_bind1st__islt_fp64)
// C=scalar+B' GB (_bind1st_tran__islt_fp64)
// C=A+scalar GB (_bind2nd__islt_fp64)
// C=A'+scalar GB (_bind2nd_tran__islt_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_FP64 || GxB_NO_ISLT_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cost.c | double cost_Zsamp(double **Z, int sh){
int i, j, k, l;
double log_like = 0.0, dist;
#pragma omp parallel for private(i, j, k, l, dist) default(shared)
for(k = 1; k <= nITEM; k++){
for(i = 2; i <= ncount[sh]; i++)
for(j = 1; j < i; j++){
for(dist = 0.0, l = 1; l <= nDIM; l++) dist += pow((Z[i][l] - Z[j][l]), 2.0);
dist = sqrt(dist);
if(SCHOOL[sh].Y[k][i][j] == 1) log_like += -log(1.0 + exp(-(SCHOOL[sh].oldbeta[k] - dist)));
else log_like += -log(1.0 + exp(SCHOOL[sh].oldbeta[k] - dist));
}
}
return log_like;
}
double cost_Zitem(double **Z, int sh){
int i, j, k, l;
double log_like = 0.0, dist;
#pragma omp parallel for private(i, j, k, l, dist) default(shared)
for(k = 1; k <= ncount[sh]; k++){
for(i = 2; i <= nITEM; i++)
for(j = 1; j < i; j++){
for(dist = 0.0, l = 1; l <= nDIM; l++) dist += pow((Z[i][l] - Z[j][l]), 2.0);
dist = sqrt(dist);
if(SCHOOL[sh].U[k][i][j] == 1) log_like += -log(1.0 + exp(-(SCHOOL[sh].oldtheta[k] - dist)));
else log_like += -log(1.0 + exp(SCHOOL[sh].oldtheta[k] - dist));
}
}
return log_like;
}
double cost_beta(int item, double beta, int sh){
int i, j, l;
double log_like = 0.0, dist;
//#pragma omp parallel for private(i, j, l, dist) default(shared)
for(i = 2; i <= ncount[sh]; i++)
for(j = 1; j < i; j++){
for(dist = 0.0, l = 1; l <= nDIM; l++) dist += pow((SCHOOL[sh].old_Zsamp[i][l] - SCHOOL[sh].old_Zsamp[j][l]), 2.0);
dist = sqrt(dist);
if(SCHOOL[sh].Y[item][i][j] == 1) log_like += -log(1 + exp(-(beta - dist)));
else log_like += -log(1 + exp(beta - dist));
}
return log_like;
}
double cost_theta(int sample, double theta, int sh){
int i, j, l;
double log_like = 0.0, dist;
//#pragma omp parallel for private(i, j, l, dist) default(shared)
for(i = 2; i <= nITEM; i++)
for(j = 1; j < i; j++){
for(dist = 0.0, l = 1; l <= nDIM; l++) dist += pow((SCHOOL[sh].old_Zitem[i][l] - SCHOOL[sh].old_Zitem[j][l]), 2.0);
dist = sqrt(dist);
if(SCHOOL[sh].U[sample][i][j] == 1) log_like += -log(1 + exp(-(theta - dist)));
else log_like += -log(1 + exp(theta - dist));
}
return log_like;
}
|
matvec_int_avx2.c | //matvec.c
//Multiplies a matrix by a vector
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 1200
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(int **matrix, int *vector) {
for (int i = 0; i<N; i++) {
for (int j = 0; j<N; j++) {
matrix[i][j] = (int)rand()/(int)(RAND_MAX/10.0);
}
vector[i] = (int)rand()/(int)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
void sum(int **matrix, int *vector, int **dest) {
int s = 0;
for (int i = 0; i<N; i++) {
s = 0;
#pragma omp simd simdlen(8)
for (int j = 0; j<N; j++) {
dest[i][j] = matrix[i][j] * vector[j];
}
}
}
// Debug functions
void serial(int **matrix, int *vector, int **dest) {
for (int i = 0; i<N; i++) {
for (int j = 0; j<N; j++) {
dest[i][j] = matrix[i][j] * vector[j];
}
}
}
void print_matrix(int **matrix) {
for (int i = 0; i<8; i++) {
printf("[");
for (int j = 0; j<8; j++) {
printf("%d ", matrix[i][j]);
}
puts("]");
}
puts("");
}
void print_vector(int *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%d ", vector[i]);
}
puts("]");
}
int check(int **A, int **B){
int difference = 0;
for(int i = 0;i<N; i++){
for (int j = 0; j<N; j++)
{ difference += A[i][j]- B[i][j];}
}
return difference;
}
int main(int argc, char **argv) {
//Set everything up
int **dest_matrix = malloc(sizeof(int*)*N);
int **serial_matrix = malloc(sizeof(int*)*N);
int **matrix = malloc(sizeof(int*)*N);
int *vector = malloc(sizeof(int)*N);
for (int i = 0; i<N; i++) {
dest_matrix[i] = malloc(sizeof(int)*N);
serial_matrix[i] = malloc(sizeof(int)*N);
matrix[i] = malloc(sizeof(int)*N);
}
srand(time(NULL));
init(matrix, vector);
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
sum(matrix, vector, dest_matrix);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
serial(matrix, vector, serial_matrix);
double t_serial = (read_timer() - start_serial);
print_matrix(matrix);
print_vector(vector);
puts("=\n");
print_matrix(dest_matrix);
puts("---------------------------------");
print_matrix(serial_matrix);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("Matrix-vector (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("Matrix-vector (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
printf("Correctness check: %d\n", check(dest_matrix,serial_matrix));
free(dest_matrix);
free(serial_matrix);
free(matrix);
free(vector);
return 0;
}
|
calcMuAB2.c | #include <mex.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
int min(int A, int B) {
if (A < B) {
return A;
} else {
return B;
}
}
int max(int A, int B) {
if (A > B) {
return A;
} else {
return B;
}
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
/* Input variables */
double *mu = mxGetPr(prhs[0]);
double *prec = mxGetPr(prhs[1]);
int numRows = (int) mxGetScalar(prhs[2]);
int numBounds = (int) mxGetScalar(prhs[3]);
int numColumnsShape = (int) mxGetScalar(prhs[4]);
int numColumnsPred = (int) mxGetScalar(prhs[5]);
int *colA = (int*) mxGetData(prhs[6]);
double *colAFac = mxGetPr(prhs[7]);
int *colB = (int*) mxGetData(prhs[8]);
double *colBFac = mxGetPr(prhs[9]);
int *boundsPred = (int*) mxGetData(prhs[10]);
/* intern variables and pointers */
float* mu_a_b2 = NULL;
float factor1,factor2,muMean;
int i,j,k,idx,idxA,idxB;
/* 2-D matrix with [numBounds,numColumnsPred] */
plhs[0] = mxCreateNumericMatrix(numRows,(numBounds-1)*numColumnsPred,mxSINGLE_CLASS,mxREAL);
mu_a_b2 = (float *) mxGetPr(plhs[0]);
/* negative entropy of q_c */
#pragma omp parallel for private(k,i,idx,factor1,factor2,muMean,idxA,idxB)
for (j=0; j < numColumnsPred; j++) {
for (k=0; k < numBounds-1; k++) {
idxA = colA[j]+k*numColumnsShape; idxB = colB[j]+k*numColumnsShape;
muMean = (float) (colAFac[j]*mu[idxA+numColumnsShape] + colBFac[j]*mu[idxB+numColumnsShape]);
factor1 = (float) (colAFac[j]*prec[idxA] + colBFac[j]*prec[idxB]);
factor2 = (float) (colAFac[j]*mu[idxA]*prec[idxA] + colBFac[j]*mu[idxB]*prec[idxB]);
idx = (k*numColumnsPred + j)*numRows;
for (i=boundsPred[j*2]; i <= boundsPred[j*2+1]; i++) {
mu_a_b2[idx + i] = muMean - (factor1*(i+1) - factor2);
}
}
}
}
|
volumeramdistancetransform.h | /*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2016-2020 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#pragma once
#include <modules/base/basemoduledefine.h>
#include <inviwo/core/util/indexmapper.h>
#include <inviwo/core/datastructures/volume/volume.h>
#include <inviwo/core/datastructures/volume/volumeramprecision.h>
#ifdef IVW_USE_OPENMP
#include <omp.h>
#endif
namespace inviwo {
namespace util {
/**
* Implementation of Euclidean Distance Transform according to Saito's algorithm:
* T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations
* of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11).
* pp. 1551-1565, 1994.
* http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf
*
* Calculates the distance in grid index space
* * Predicate is a function of type (const T &value) -> bool to deside if a value in the input
* is a "feature".
* * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all
* squared distance values at the end of the calculation.
* * ProcessCallback is a function of type (double progress) -> void that is called with a value
* from 0 to 1 to indicate the progress of the calculation.
*/
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void volumeRAMDistanceTransform(const VolumeRAMPrecision<T>* inVolume,
VolumeRAMPrecision<U>* outDistanceField, const Matrix<3, U> basis,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename T, typename U>
void volumeRAMDistanceTransform(const VolumeRAMPrecision<T>* inVolume,
VolumeRAMPrecision<U>* outDistanceField, const Matrix<3, U> basis,
const size3_t upsample);
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename U, typename ProgressCallback>
void volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale, ProgressCallback callback);
template <typename U>
void volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale);
} // namespace util
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void util::volumeRAMDistanceTransform(const VolumeRAMPrecision<T>* inVolume,
VolumeRAMPrecision<U>* outDistanceField,
const Matrix<3, U> basis, const size3_t upsample,
Predicate predicate, ValueTransform valueTransform,
ProgressCallback callback) {
#ifdef IVW_USE_OPENMP
omp_set_num_threads(std::thread::hardware_concurrency());
#endif
using int64 = glm::int64;
auto square = [](auto a) { return a * a; };
callback(0.0);
const T* src = inVolume->getDataTyped();
U* dst = outDistanceField->getDataTyped();
const i64vec3 srcDim{inVolume->getDimensions()};
const i64vec3 dstDim{outDistanceField->getDimensions()};
const i64vec3 sm{upsample};
const auto squareBasis = glm::transpose(basis) * basis;
const Vector<3, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1], squareBasis[2][2]};
const Vector<3, U> squareVoxelSize{squareBasisDiag / Vector<3, U>{dstDim * dstDim}};
const Vector<3, U> invSquareVoxelSize{Vector<3, U>{1.0f} / squareVoxelSize};
{
const auto maxdist = glm::compMax(squareBasisDiag);
bool orthogonal = true;
for (size_t i = 0; i < squareBasis.length(); i++) {
for (size_t j = 0; j < squareBasis.length(); j++) {
if (i != j) {
if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) {
orthogonal = false;
break;
}
}
}
}
if (!orthogonal) {
LogWarnCustom(
"volumeRAMDistanceTransform",
"Calculating the distance transform on a non-orthogonal volume will not give "
"correct values");
}
}
if (srcDim * sm != dstDim) {
throw Exception(
"DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) +
" dst = " + toString(dstDim) + " scaling = " + toString(sm),
IVW_CONTEXT_CUSTOM("volumeRAMDistanceTransform"));
}
util::IndexMapper<3, int64> srcInd(srcDim);
util::IndexMapper<3, int64> dstInd(dstDim);
auto is_feature = [&](const int64 x, const int64 y, const int64 z) {
return predicate(src[srcInd(x / sm.x, y / sm.y, z / sm.z)]);
};
// first pass, forward and backward scan along x
// result: min distance in x direction
#ifdef IVW_USE_OPENMP
#pragma omp parallel for
#endif
for (int64 z = 0; z < dstDim.z; ++z) {
for (int64 y = 0; y < dstDim.y; ++y) {
// forward
U dist = static_cast<U>(dstDim.x);
for (int64 x = 0; x < dstDim.x; ++x) {
if (!is_feature(x, y, z)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y, z)] = squareVoxelSize.x * square(dist);
}
// backward
dist = static_cast<U>(dstDim.x);
for (int64 x = dstDim.x - 1; x >= 0; --x) {
if (!is_feature(x, y, z)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y, z)] =
std::min<U>(dst[dstInd(x, y, z)], squareVoxelSize.x * square(dist));
}
}
}
// second pass, scan y direction
// for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY
// result: min distance in x and y direction
callback(0.3);
#ifdef IVW_USE_OPENMP
#pragma omp parallel
#endif
{
std::vector<U> buff;
buff.resize(dstDim.y);
#ifdef IVW_USE_OPENMP
#pragma omp for
#endif
for (int64 z = 0; z < dstDim.z; ++z) {
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 y = 0; y < dstDim.y; ++y) {
buff[y] = dst[dstInd(x, y, z)];
}
for (int64 y = 0; y < dstDim.y; ++y) {
auto d = buff[y];
if (d != U(0)) {
const auto rMax =
static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1;
const auto rStart = std::min(rMax, y - 1);
const auto rEnd = std::min(rMax, dstDim.y - y);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[y + n] + squareVoxelSize.y * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y, z)] = d;
}
}
}
}
// third pass, scan z direction
// for each voxel v(x,y,z) find min_i(data(x,y,i) + (z - i)^2), 0 <= i < dimZ
// result: min distance in x and y direction
callback(0.6);
#ifdef IVW_USE_OPENMP
#pragma omp parallel
#endif
{
std::vector<U> buff;
buff.resize(dstDim.z);
#ifdef IVW_USE_OPENMP
#pragma omp for
#endif
for (int64 y = 0; y < dstDim.y; ++y) {
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 z = 0; z < dstDim.z; ++z) {
buff[z] = dst[dstInd(x, y, z)];
}
for (int64 z = 0; z < dstDim.z; ++z) {
auto d = buff[z];
if (d != U(0)) {
const auto rMax =
static_cast<int64>(std::sqrt(d * invSquareVoxelSize.z)) + 1;
const auto rStart = std::min(rMax, z - 1);
const auto rEnd = std::min(rMax, dstDim.z - z);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[z + n] + squareVoxelSize.z * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y, z)] = d;
}
}
}
}
// scale data
callback(0.9);
const int64 volSize = dstDim.x * dstDim.y * dstDim.z;
#ifdef IVW_USE_OPENMP
#pragma omp parallel for
#endif
for (int64 i = 0; i < volSize; ++i) {
dst[i] = valueTransform(dst[i]);
}
callback(1.0);
}
template <typename T, typename U>
void util::volumeRAMDistanceTransform(const VolumeRAMPrecision<T>* inVolume,
VolumeRAMPrecision<U>* outDistanceField,
const Matrix<3, U> basis, const size3_t upsample) {
util::volumeRAMDistanceTransform(
inVolume, outDistanceField, basis, upsample,
[](const T& val) { return util::glm_convert_normalized<double>(val) > 0.5; },
[](const U& squareDist) {
return static_cast<U>(std::sqrt(static_cast<double>(squareDist)));
},
[](double f) {});
}
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void util::volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback) {
const auto inputVolumeRep = inVolume->getRepresentation<VolumeRAM>();
inputVolumeRep->dispatch<void, dispatching::filter::Scalars>([&](const auto vrprecision) {
volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample,
predicate, valueTransform, callback);
});
}
template <typename U, typename ProgressCallback>
void util::volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale,
ProgressCallback progress) {
const auto inputVolumeRep = inVolume->getRepresentation<VolumeRAM>();
inputVolumeRep->dispatch<void, dispatching::filter::Scalars>([&](const auto vrprecision) {
using ValueType = util::PrecisionValueType<decltype(vrprecision)>;
const auto predicateIn = [threshold](const ValueType& val) { return val < threshold; };
const auto predicateOut = [threshold](const ValueType& val) { return val > threshold; };
const auto normPredicateIn = [threshold](const ValueType& val) {
return util::glm_convert_normalized<double>(val) < threshold;
};
const auto normPredicateOut = [threshold](const ValueType& val) {
return util::glm_convert_normalized<double>(val) > threshold;
};
const auto valTransIdent = [scale](const float& squareDist) {
return static_cast<float>(scale * squareDist);
};
const auto valTransSqrt = [scale](const float& squareDist) {
return static_cast<float>(scale * std::sqrt(squareDist));
};
if (normalize && square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateIn, valTransIdent, progress);
} else if (normalize && square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateOut, valTransIdent, progress);
} else if (normalize && !square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateIn, valTransSqrt, progress);
} else if (normalize && !square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateOut, valTransSqrt, progress);
} else if (!normalize && square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateIn, valTransIdent, progress);
} else if (!normalize && square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateOut, valTransIdent, progress);
} else if (!normalize && !square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateIn, valTransSqrt, progress);
} else if (!normalize && !square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateOut, valTransSqrt, progress);
}
});
}
template <typename U>
void util::volumeDistanceTransform(const Volume* inVolume, VolumeRAMPrecision<U>* outDistanceField,
const size3_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale) {
util::volumeDistanceTransform(inVolume, outDistanceField, upsample, threshold, normalize, flip,
square, scale, [](double) {});
}
} // namespace inviwo
|
copyprivate-clause.c | #include <stdio.h>
#include <omp.h>
main() {
int n = 9, i, b[n];
for (i=0; i<n; i++)
b[i] = -1;
#pragma omp parallel
{
int a;
//Al single solo entra una de las hebras en paralelo, y copyprivate copia el valor que
//tome esa varaible en el resto de hebras ejecutandose en paralelo
#pragma omp single copyprivate(a)
{
printf("\nIntroduce valor de inicialización a: ");
scanf("%d", &a);
printf("\nSingle ejecutada por el thread %d\n",omp_get_thread_num());
}
#pragma omp for
for (i=0; i<n; i++)
b[i] = a;
}
printf("Depués de la región parallel:\n");
for (i=0; i<n; i++)
printf("b[%d] = %d\t",i,b[i]);
printf("\n");
}
|
17_blur_parallel_best.c | #include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <omp.h>
#define NX 1002
#define NY 1002
void blur(int *image, size_t szx, size_t szy, size_t iters){
int *temp = malloc(sizeof(int) * szx * szy);
for (size_t i = 0; i< NX*NY; ++i) temp[i]=image[i];
#pragma omp parallel
{
for (size_t iit = 0; iit < iters; ++iit){
#pragma omp for
for (size_t ix = 1; ix< szx-1; ++ix){
for (size_t iy = 1; iy< szy-1; ++iy){
temp[iy + ix * szy] = (int)(0.25 * (float)(image[iy + (ix+1) * szy] +
image[iy + (ix-1) * szy] + image[(iy-1) + ix * szy] +
image[(iy+1) + ix * szy]) + 0.5);
}
}
#pragma omp for
for (size_t i = 0; i < (szx * szy); ++i){
image[i] = temp[i];
}
}
}
free(temp);
}
int main(){
int image[(NX)*(NY)];
struct timespec t1, t2;
float dtime;
for (size_t i = 0; i< NX*NY; ++i) image[i]=5;
printf("OpenMP code running on %i threads\n",omp_get_max_threads());
clock_gettime(CLOCK_REALTIME, &t1);
blur(image,NX,NY, 10000);
clock_gettime(CLOCK_REALTIME, &t2);
dtime = (float)(t2.tv_sec - t1.tv_sec) + ((float)(t2.tv_nsec - t1.tv_nsec)
/1.0e9);
printf("Time taken was %f seconds\n",dtime);
printf("Arbitrary value from image %i\n",image[100]);
printf("Arbitrary value printed to avoid compiler optimising the blur out\n");
}
|
GB_binop__bclr_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__bclr_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__bclr_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint64)
// C=scalar+B GB (_bind1st__bclr_uint64)
// C=scalar+B' GB (_bind1st_tran__bclr_uint64)
// C=A+scalar GB (_bind2nd__bclr_uint64)
// C=A'+scalar GB (_bind2nd_tran__bclr_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_BITCLR (aij, bij, uint64_t, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITCLR (x, y, uint64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_UINT64 || GxB_NO_BCLR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bclr_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bclr_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bclr_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITCLR (x, bij, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITCLR (aij, y, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (x, aij, uint64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bclr_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (aij, y, uint64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Utils.h | /*******************************************************************************
# ____ __ __ _ _____ _ _ #
# / __ \ \ \ / / | | / ____| | | | #
# | | | |_ __ ___ _ __ \ /\ / /__| |__ | | __| | ___ | |__ ___ #
# | | | | '_ \ / _ \ '_ \ \/ \/ / _ \ '_ \| | |_ | |/ _ \| '_ \ / _ \ #
# | |__| | |_) | __/ | | \ /\ / __/ |_) | |__| | | (_) | |_) | __/ #
# \____/| .__/ \___|_| |_|\/ \/ \___|_.__/ \_____|_|\___/|_.__/ \___| #
# | | #
# |_| #
# #
# (c) 2011 by #
# University of Applied Sciences Northwestern Switzerland #
# Institute of Geomatics Engineering #
# martin.christen@fhnw.ch #
********************************************************************************
* Licensed under MIT License. Read the file LICENSE for more information *
*******************************************************************************/
/******************************************************************************/
/* A collection of useful MPI stuff */
/******************************************************************************/
#ifndef _MPI_UTILS_H
#define _MPI_UTILS_H
#include <mpi.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <stack>
#include <vector>
#include <iostream>
#include <ctime>
// High Performance job Manager: Distribute workload asynchronously.
template<class SJob>
class MPIJobManager
{
public:
typedef void (*CallBack_Process)(const SJob& job, int rank);
MPIJobManager(int nMaxWorkSize)
{
_nMaxWorkSize = nMaxWorkSize;
MPI_Comm_size(MPI_COMM_WORLD, &_totalnodes);
MPI_Comm_rank(MPI_COMM_WORLD, &_rank);
# ifdef _OPENMP
_nMaxthreads = omp_get_max_threads();
# else
_nMaxthreads = 1;
# endif
}
virtual ~MPIJobManager(){}
bool IsRoot() { return (_rank == 0);}
// Add job stack (
void AddJobStack(const std::stack<SJob>& js) { if (_rank == 0) _jobstack = js;}
// Add job vector (
void AddJobs(const std::vector<SJob>& js)
{
if (_rank == 0)
{
for(int i = 0; i < js.size(); i ++)
{
_jobstack.push(js[i]);
}
}
}
// or add (many) single jobs
void AddJob(const SJob& j)
{
if (_rank == 0)
{
_jobstack.push(j);
}
}
//---------------------------------------------------------------------------
// Start Processing data. For each job the specified callback function is called.
// you can also pass some userdata to it. But please keep in mind everything must be thread safe.
void Process(CallBack_Process fnc, bool bVerbose=false)
{
if (_rank == 0 && _totalnodes == 1)
{
std::vector<SJob> vecJobsRoot =_MakeRootJobPacket(_jobstack);
// -- Process on rank 0
std::cout << "-->>Rank " << _rank << " is processing " << vecJobsRoot.size()<< " jobs....!\n"<< std::flush;
#ifndef _DEBUG
#pragma omp parallel for
#endif
for (int i=0;i<(int)vecJobsRoot.size();i++)
{
fnc(vecJobsRoot[i], _rank);
}
std::cout << "<<--Rank " << _rank << " finished processing " << vecJobsRoot.size()<< " jobs!\n"<< std::flush;
}
else if (_rank == 0)
{
int totaljobs = (int)_jobstack.size();
clock_t tprog0, tprog1;
tprog0 = clock();
if (bVerbose)
{
std::cout << "Jobmanager is starting...\n";
std::cout << "Total jobs: " << totaljobs << "\n" << std::flush;
}
for (int i=1;i<_totalnodes;i++)
{
_MakeJobPacket(_jobstack, i);
}
if(bVerbose)
{
std::cout << " Prepared " << (totaljobs-_jobstack.size()) << " jobs of "<< totaljobs << " for external nodes...\n";
}
while (_lstActiveRequests.size()>0)
{
if(bVerbose)
{
std::cout << " Remaining requests: " << _lstActiveRequests.size() << "\n";
}
std::vector<int> freenodes;
std::list< std::pair<MPI_Request, int> >::iterator it = _lstActiveRequests.begin();
if (it != _lstActiveRequests.end())
{
MPI_Request& req = it->first;
int flag;
MPI_Test(&req, &flag, MPI_STATUS_IGNORE);
if (flag)
{
int targetnode = it->second;
it = _lstActiveRequests.erase(it);
if (_jobstack.size()>0)
{
freenodes.push_back(targetnode);
}
}
}
if(freenodes.size() > 0)
{
for (size_t i=0;i<freenodes.size();i++)
{
_MakeJobPacket(_jobstack, freenodes[i]);
}
}
/*if (_jobstack.size()>0)
{
std::vector<SJob> vecJobsRoot =_MakeRootJobPacket(_jobstack);
// -- Process on rank 0
std::cout << "-->>Rank " << _rank << " is processing " << vecJobsRoot.size()<< " jobs....!\n"<< std::flush;
#pragma omp parallel for
for (int i=0;i<(int)vecJobsRoot.size();i++)
{
fnc(vecJobsRoot[i], _rank);
}
std::cout << "<<--Rank " << _rank << " finished processing " << vecJobsRoot.size()<< " jobs!\n"<< std::flush;
}*/
}
// send terminate
for (int i=1;i<_totalnodes;i++)
{
_SendTerminate(i);
}
}
else
{
std::vector<SJob> vecJobs;
while (_ReceiveJobs(vecJobs))
{
std::cout << "-->>Rank " << _rank << " is processing " << vecJobs.size()<< " jobs....!\n"<< std::flush;
#pragma omp parallel for
for (int i=0;i<(int)vecJobs.size();i++)
{
fnc(vecJobs[i], _rank);
}
std::cout << "<<--Rank " << _rank << " finished processing " << vecJobs.size()<< " jobs!\n"<< std::flush;
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
//---------------------------------------------------------------------------
protected:
//---------------------------------------------------------------------------
// Member variables
//---------------------------------------------------------------------------
int _totalnodes;
int _rank;
int _nMaxWorkSize;
int _nMaxthreads;
std::stack<SJob> _jobstack;
std::list< std::pair<MPI_Request, int> > _lstActiveRequests;
//---------------------------------------------------------------------------
// private methods
//---------------------------------------------------------------------------
void _SendJobs(std::vector<SJob>& vecJobs, int target)
{
MPI_Request request;
std::cout << " ..Sending "<< vecJobs.size()<<" jobs to rank " << target << "\n"<< std::flush;
int count = vecJobs.size() * sizeof(SJob);
if (count > 0)
{
void* adr = (void*) &(vecJobs[0]);
MPI_Isend (adr, count, MPI_BYTE, target, 77, MPI_COMM_WORLD, &request);
_lstActiveRequests.push_back(std::pair<MPI_Request, int>(request, target));
}
}
//---------------------------------------------------------------------------
void _SendTerminate(int target)
{
unsigned char data = 88;
MPI_Send(&data, 1, MPI_BYTE, target, 88, MPI_COMM_WORLD);
}
//---------------------------------------------------------------------------
// receive jobs or return false if there are no more jobs!
bool _ReceiveJobs(std::vector<SJob>& vecJobs)
{
vecJobs.clear();
MPI_Request request;
int flag = 0;
MPI_Status status;
int msglen;
while(true)
{
int flag = 0;
MPI_Iprobe(0, 88, MPI_COMM_WORLD, &flag, &status);
if (flag) // "terminate"
{
std::cout << " ..Rank "<< _rank <<" received TERMINATE signal\n" << std::flush;
char buffer;
MPI_Irecv (&buffer, 1, MPI_BYTE, 0, 88, MPI_COMM_WORLD, &request);
MPI_Wait(&request, &status);
return false;
}
MPI_Iprobe(0, 77, MPI_COMM_WORLD, &flag, &status);
if (flag)
{
MPI_Get_count(&status, MPI_BYTE, &msglen);
vecJobs.resize(msglen / sizeof(SJob));
void* adr = (void*) &(vecJobs[0]);
MPI_Irecv (adr, msglen, MPI_BYTE, 0, 77, MPI_COMM_WORLD, &request);
MPI_Wait(&request, &status);
std::cout << " ..Rank "<< _rank <<" received "<< vecJobs.size() << " Jobs\n" << std::flush;
return true;
}
}
}
//---------------------------------------------------------------------------
void _MakeJobPacket( std::stack<SJob> &jobs, int i)
{
std::vector<SJob> vJobs;
for (int w=0;w<_nMaxWorkSize;w++)
{
if (jobs.size()>0)
{
vJobs.push_back(jobs.top());
jobs.pop();
}
}
if (vJobs.size()>0)
{
_SendJobs(vJobs, i);
}
}
//---------------------------------------------------------------------------
std::vector<SJob> _MakeRootJobPacket(std::stack<SJob> &jobs)
{
std::vector<SJob> vJobs;
for (int w=0;w<_nMaxWorkSize;w++)
{
if (jobs.size()>0)
{
vJobs.push_back(jobs.top());
jobs.pop();
}
}
return vJobs;
}
private:
MPIJobManager(){}
MPIJobManager(const MPIJobManager&){}
};
#endif |
for-7.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-ompexp" } */
extern void bar(int);
void foo (int n)
{
int i;
#pragma omp for schedule(static) ordered
for (i = 0; i < n; ++i)
bar(i);
}
/* { dg-final { scan-tree-dump-times "GOMP_loop_ordered_static_start" 1 "ompexp" } } */
/* { dg-final { scan-tree-dump-times "GOMP_loop_ordered_static_next" 1 "ompexp" } } */
|
kcommittee.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "ompdist/vector.h"
#include "ompdist/graph.h"
#include "ompdist/graph_gen.h"
#include "ompdist/queues.h"
#include "ompdist/utils.h"
#include "ompdist/msr.h"
#include "config.h"
typedef struct {
int x;
int y;
} invitation;
typedef struct {
int default_leader;
int leader;
int committee;
int min_active;
invitation invite;
} payload;
int min(int a, int b) {
return a < b ? a : b;
}
/**
* min_invitation - computes and sets the lexicographically smaller invitation
* in `a`.
*
* @a: one invitation
* @b: the other invitation
*/
void min_invitation(invitation* a, invitation* b) {
if ((b->x < a->x) || (b->x == a->x && b->y < a->y)) {
a->x = b->x;
a->y = b->y;
return;
}
}
/**
* initialize_graph - Initializes the graph with basic data.
*
* @g: a pointer to the graph object
* @kvals: the initial leader values
*/
void initialize_graph(graph* g, int* kvals) {
invitation default_invite = { g->N, g->N };
for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = malloc(sizeof(payload));
data->leader = kvals[i];
data->default_leader = kvals[i];
data->committee = g->N+1;
data->min_active = g->N+1;
data->invite = default_invite;
cur->data = data;
}
}
/**
* do_polling - Performs the polling phase of the k-committee algorithm
*
* @g: the graph itself
* @K: the maximum number of elements in a committee
* @active_ql: a pointer to a queuelist object that's going to store `min_active`s
*/
void do_polling(graph* g, int K, queuelist* active_ql) {
DEBUG("starting polling\n");
DEBUG("starting K-1 rounds\n");
for (int k = 0; k < K-1; k++) {
DEBUG("round k = %d\n", k);
/**
* Broadcast each node's `min_active` to its neighbors.
*/
DEBUG("broadcasting `min_active`s\n");
#pragma omp parallel for schedule(SCHEDULING_METHOD)
for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
if (data->committee == g->N+1)
data->min_active = data->default_leader;
else
data->min_active = g->N+1;
for (int j = 0; j < cur->degree; j++) {
node* neighbor = *((node**) elem_at(&cur->neighbors, j));
enqueue(active_ql, neighbor->label, &data->min_active);
}
}
/**
* Receive all the broadcasted `min_active`s.
*/
DEBUG("receiving broadcasted transmissions\n");
#pragma omp parallel for schedule(SCHEDULING_METHOD)
for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
while(!is_ql_queue_empty(active_ql, i)) {
int* active = dequeue(active_ql, i);
data->min_active = min(data->min_active, *active);
}
data->leader = min(data->leader, data->min_active);
}
}
}
/**
* do_selection - Performs the selection phase of the k-committee algorithm
*
* @g: the graph itself
* @K: the maximum number of elements in a committee
* @invite_ql: a pointer to a queuelist object that's going to store invitations
*/
void do_selection(graph* g, int K, queuelist* invite_ql) {
DEBUG("starting selection\n");
DEBUG("creating initial invitations\n");
#pragma omp parallel for schedule(SCHEDULING_METHOD)
for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
if (data->leader == data->default_leader) {
data->invite.x = i;
data->invite.y = data->min_active;
}
else {
data->invite.x = g->N+1;
data->invite.y = g->N+1;
}
}
DEBUG("starting K-1 rounds\n");
for (int k = 0; k < K-1; k++) {
DEBUG("round k = %d\n", k);
/**
* Broadcast invitations to neighbors.
*/
DEBUG("broadcasting invitations\n");
#pragma omp parallel for schedule(SCHEDULING_METHOD)
for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
for (int j = 0; j < cur->degree; j++) {
node* neighbor = *((node**) elem_at(&cur->neighbors, j));
enqueue(invite_ql, neighbor->label, &data->invite);
}
}
/**
* Of all the invitations we've received, choose the lexicographically
* smallest one.
*/
DEBUG("receiving broadcasted invitations\n");
#pragma omp parallel for schedule(SCHEDULING_METHOD)
for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
while (!is_ql_queue_empty(invite_ql, i)) {
invitation* invite = dequeue(invite_ql, i);
min_invitation(&data->invite, invite);
}
// make sure the invite is for us
if (data->invite.y == data->default_leader && data->invite.x == data->leader)
data->committee = data->leader;
}
}
}
/**
* legalize_committees - A final sanity check to make sure there aren't any
* illegal committees with IDs larger than the number of vertices.
*
* @g: the graph itself
*/
void legalize_committees(graph* g) {
DEBUG("making sure there aren't any illegal committees\n");
#pragma omp parallel for schedule(SCHEDULING_METHOD)
for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
if (data->committee >= g->N)
data->committee = i;
}
}
/**
* verify_and_print_solution - Verifies that the generated committee is legal
* and prints the solution.
*
* @g: the graph itself
* @K: the maximum number of elements in a committee
*
* Returns 0 if the produced partitioning is correct. Returns 1 otherwise.
*/
int verify_and_print_solution(graph* g, int K) {
int correct = 1;
int* committee_count = malloc(g->N * sizeof(int));
for (int i = 0; i < g->N; i++)
committee_count[i] = 0;
for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
if (data->committee >= g->N) {
correct = 0;
WARN("%d apparently belongs to a non-existant committee %d\n", i, data->committee);
goto end;
}
committee_count[data->committee]++;
INFO("%d->committee = %d\n", i, data->committee);
}
for (int i = 0; i < g->N; i++) {
if (committee_count[i] > K) {
WARN("committee %d has too many members (%d > %d)\n", i, committee_count[i], K);
correct = 0;
}
}
end:
free(committee_count);
if (correct)
INFO("Produced solution is correct\n");
else
INFO("Produced solution is incorrect\n");
return !correct;
}
/**
* Based on Roger Wattenhofer's Principles of Distributed Computing's
* section 23.4.2 on k-Committee election.
*/
int main(int argc, char* argv[]) {
int N;
int M;
int K;
int* kvals;
graph* g;
int iterate;
int iterations = 1;
if ((iterate = input_through_argv(argc, argv))) {
FILE* in = fopen(argv[2], "r");
fscanf(in, "%d\n", &N);
kvals = malloc(N * sizeof(int));
fscanf(in, "%d\n", &K);
g = new_graph(N, 0);
g->M = M = read_graph(g, in);
fscanf(in, "\n");
for (int i = 0; i < N; i++)
fscanf(in, "%d", &kvals[i]);
fclose(in);
sscanf(argv[3], "%d", &iterations);
}
else {
N = 16;
M = 64;
K = 4;
if (argc > 1) {
sscanf(argv[1], "%d", &N);
sscanf(argv[2], "%d", &M);
sscanf(argv[3], "%d", &K);
}
g = generate_new_connected_graph(N, M);
kvals = malloc(N * sizeof(int));
for (int i = 0; i < N; i++)
kvals[i] = i;
}
long long duration = 0;
double total_energy = 0;
int verification;
for (int i = 0; i < iterations; i++) {
queuelist* active_ql = new_queuelist(N, sizeof(int));
queuelist* invite_ql = new_queuelist(N, sizeof(invitation));
begin_timer();
init_energy_measure();
initialize_graph(g, kvals);
for (int k = 0; k < K; k++) {
DEBUG("phase k = %d\n", k);
do_polling(g, K, active_ql);
do_selection(g, K, invite_ql);
}
legalize_committees(g);
total_energy += total_energy_used();
duration += time_elapsed();
verification = verify_and_print_solution(g, K);
free_queuelist(invite_ql);
free_queuelist(active_ql);
}
free(kvals);
if (iterate)
printf("%.2lf %.2lf\n", ((double) duration) / iterations, total_energy / iterations);
return verification;
}
|
callback.h | #ifndef _BSD_SOURCE
#define _BSD_SOURCE
#endif
#define _DEFAULT_SOURCE
#include <stdio.h>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include <omp.h>
#include <omp-tools.h>
#include "ompt-signal.h"
// Used to detect architecture
#include "../../src/kmp_platform.h"
static const char* ompt_thread_t_values[] = {
NULL,
"ompt_thread_initial",
"ompt_thread_worker",
"ompt_thread_other"
};
static const char* ompt_task_status_t_values[] = {
NULL,
"ompt_task_complete", // 1
"ompt_task_yield", // 2
"ompt_task_cancel", // 3
"ompt_task_detach", // 4
"ompt_task_early_fulfill", // 5
"ompt_task_late_fulfill", // 6
"ompt_task_switch" // 7
};
static const char* ompt_cancel_flag_t_values[] = {
"ompt_cancel_parallel",
"ompt_cancel_sections",
"ompt_cancel_loop",
"ompt_cancel_taskgroup",
"ompt_cancel_activated",
"ompt_cancel_detected",
"ompt_cancel_discarded_task"
};
static void format_task_type(int type, char *buffer) {
char *progress = buffer;
if (type & ompt_task_initial)
progress += sprintf(progress, "ompt_task_initial");
if (type & ompt_task_implicit)
progress += sprintf(progress, "ompt_task_implicit");
if (type & ompt_task_explicit)
progress += sprintf(progress, "ompt_task_explicit");
if (type & ompt_task_target)
progress += sprintf(progress, "ompt_task_target");
if (type & ompt_task_undeferred)
progress += sprintf(progress, "|ompt_task_undeferred");
if (type & ompt_task_untied)
progress += sprintf(progress, "|ompt_task_untied");
if (type & ompt_task_final)
progress += sprintf(progress, "|ompt_task_final");
if (type & ompt_task_mergeable)
progress += sprintf(progress, "|ompt_task_mergeable");
if (type & ompt_task_merged)
progress += sprintf(progress, "|ompt_task_merged");
}
static ompt_set_callback_t ompt_set_callback;
static ompt_get_callback_t ompt_get_callback;
static ompt_get_state_t ompt_get_state;
static ompt_get_task_info_t ompt_get_task_info;
static ompt_get_thread_data_t ompt_get_thread_data;
static ompt_get_parallel_info_t ompt_get_parallel_info;
static ompt_get_unique_id_t ompt_get_unique_id;
static ompt_get_num_procs_t ompt_get_num_procs;
static ompt_get_num_places_t ompt_get_num_places;
static ompt_get_place_proc_ids_t ompt_get_place_proc_ids;
static ompt_get_place_num_t ompt_get_place_num;
static ompt_get_partition_place_nums_t ompt_get_partition_place_nums;
static ompt_get_proc_id_t ompt_get_proc_id;
static ompt_enumerate_states_t ompt_enumerate_states;
static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls;
static void print_ids(int level)
{
int task_type, thread_num;
ompt_frame_t *frame;
ompt_data_t *task_parallel_data;
ompt_data_t *task_data;
int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame,
&task_parallel_data, &thread_num);
char buffer[2048];
format_task_type(task_type, buffer);
if (frame)
printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, "
"task_type=%s=%d, thread_num=%d\n",
ompt_get_thread_data()->value, level,
exists_task ? task_parallel_data->value : 0,
exists_task ? task_data->value : 0, frame->exit_frame.ptr,
frame->enter_frame.ptr, buffer, task_type, thread_num);
}
#define get_frame_address(level) __builtin_frame_address(level)
#define print_frame(level) \
printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \
ompt_get_thread_data()->value, level, get_frame_address(level))
// clang (version 5.0 and above) adds an intermediate function call with debug flag (-g)
#if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN)
#if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5
#define print_frame_from_outlined_fn(level) print_frame(level+1)
#else
#define print_frame_from_outlined_fn(level) print_frame(level)
#endif
#if defined(__clang__) && __clang_major__ >= 5
#warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information."
#warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!"
#endif
#endif
// This macro helps to define a label at the current position that can be used
// to get the current address in the code.
//
// For print_current_address():
// To reliably determine the offset between the address of the label and the
// actual return address, we insert a NOP instruction as a jump target as the
// compiler would otherwise insert an instruction that we can't control. The
// instruction length is target dependent and is explained below.
//
// (The empty block between "#pragma omp ..." and the __asm__ statement is a
// workaround for a bug in the Intel Compiler.)
#define define_ompt_label(id) \
{} \
__asm__("nop"); \
ompt_label_##id:
// This macro helps to get the address of a label that is inserted by the above
// macro define_ompt_label(). The address is obtained with a GNU extension
// (&&label) that has been tested with gcc, clang and icc.
#define get_ompt_label_address(id) (&& ompt_label_##id)
// This macro prints the exact address that a previously called runtime function
// returns to.
#define print_current_address(id) \
define_ompt_label(id) \
print_possible_return_addresses(get_ompt_label_address(id))
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// On X86 the NOP instruction is 1 byte long. In addition, the comiler inserts
// a MOV instruction for non-void runtime functions which is 3 bytes long.
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \
ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4)
#elif KMP_ARCH_PPC64
// On Power the NOP instruction is 4 bytes long. In addition, the compiler
// inserts an LD instruction which accounts for another 4 bytes. In contrast to
// X86 this instruction is always there, even for void runtime functions.
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p\n", ompt_get_thread_data()->value, \
((char *)addr) - 8)
#elif KMP_ARCH_AARCH64
// On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted
// store instruction (another 4 bytes long).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 4, ((char *)addr) - 8)
#else
#error Unsupported target architecture, cannot determine address offset!
#endif
// This macro performs a somewhat similar job to print_current_address(), except
// that it discards a certain number of nibbles from the address and only prints
// the most significant bits / nibbles. This can be used for cases where the
// return address can only be approximated.
//
// To account for overflows (ie the most significant bits / nibbles have just
// changed as we are a few bytes above the relevant power of two) the addresses
// of the "current" and of the "previous block" are printed.
#define print_fuzzy_address(id) \
define_ompt_label(id) \
print_fuzzy_address_blocks(get_ompt_label_address(id))
// If you change this define you need to adapt all capture patterns in the tests
// to include or discard the new number of nibbles!
#define FUZZY_ADDRESS_DISCARD_NIBBLES 2
#define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4))
#define print_fuzzy_address_blocks(addr) \
printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 \
" or 0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \
ompt_get_thread_data()->value, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 2, addr)
static void
on_ompt_callback_mutex_acquire(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ": ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ": ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ": ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_acquired(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ": ompt_event_acquired_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ": ompt_event_acquired_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ": ompt_event_acquired_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_released(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_release_nest_lock_last: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ": ompt_event_release_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ": ompt_event_release_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ": ompt_event_release_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_nest_lock(
ompt_scope_endpoint_t endpoint,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ": ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_release_nest_lock_prev: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
}
}
static void
on_ompt_callback_sync_region(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
print_ids(0);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
break;
}
break;
}
}
static void
on_ompt_callback_sync_region_wait(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_wait_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
break;
}
break;
}
}
static void
on_ompt_callback_flush(
ompt_data_t *thread_data,
const void *codeptr_ra)
{
printf("%" PRIu64 ": ompt_event_flush: codeptr_ra=%p\n", thread_data->value, codeptr_ra);
}
static void
on_ompt_callback_cancel(
ompt_data_t *task_data,
int flags,
const void *codeptr_ra)
{
const char* first_flag_value;
const char* second_flag_value;
if(flags & ompt_cancel_parallel)
first_flag_value = ompt_cancel_flag_t_values[0];
else if(flags & ompt_cancel_sections)
first_flag_value = ompt_cancel_flag_t_values[1];
else if(flags & ompt_cancel_loop)
first_flag_value = ompt_cancel_flag_t_values[2];
else if(flags & ompt_cancel_taskgroup)
first_flag_value = ompt_cancel_flag_t_values[3];
if(flags & ompt_cancel_activated)
second_flag_value = ompt_cancel_flag_t_values[4];
else if(flags & ompt_cancel_detected)
second_flag_value = ompt_cancel_flag_t_values[5];
else if(flags & ompt_cancel_discarded_task)
second_flag_value = ompt_cancel_flag_t_values[6];
printf("%" PRIu64 ": ompt_event_cancel: task_data=%" PRIu64 ", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, task_data->value, first_flag_value, second_flag_value, flags, codeptr_ra);
}
static void
on_ompt_callback_implicit_task(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
unsigned int team_size,
unsigned int thread_num,
int flags)
{
switch(endpoint)
{
case ompt_scope_begin:
if(task_data->ptr)
printf("%s\n", "0: task_data initially not null");
task_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_implicit_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_implicit_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, team_size, thread_num);
break;
}
}
static void
on_ompt_callback_lock_init(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_lock_destroy(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_destroy_nest_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_work(
ompt_work_t wstype,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
uint64_t count,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ": ompt_event_loop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ": ompt_event_sections_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ": ompt_event_single_in_block_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ": ompt_event_single_others_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ": ompt_event_distribute_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ": ompt_event_taskloop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
}
break;
case ompt_scope_end:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ": ompt_event_loop_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ": ompt_event_sections_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ": ompt_event_single_in_block_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ": ompt_event_single_others_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ": ompt_event_distribute_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ": ompt_event_taskloop_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
}
break;
}
}
static void
on_ompt_callback_master(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ": ompt_event_master_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_master_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
}
}
static void on_ompt_callback_parallel_begin(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame, ompt_data_t *parallel_data,
uint32_t requested_team_size, int flag, const void *codeptr_ra) {
if(parallel_data->ptr)
printf("0: parallel_data initially not null\n");
parallel_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_parallel_begin: parent_task_id=%" PRIu64
", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, "
"parallel_id=%" PRIu64 ", requested_team_size=%" PRIu32
", codeptr_ra=%p, invoker=%d\n",
ompt_get_thread_data()->value, encountering_task_data->value,
encountering_task_frame->exit_frame.ptr,
encountering_task_frame->enter_frame.ptr, parallel_data->value,
requested_team_size, codeptr_ra, flag);
}
static void on_ompt_callback_parallel_end(ompt_data_t *parallel_data,
ompt_data_t *encountering_task_data,
int flag, const void *codeptr_ra) {
printf("%" PRIu64 ": ompt_event_parallel_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
encountering_task_data->value, flag, codeptr_ra);
}
static void
on_ompt_callback_task_create(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame,
ompt_data_t* new_task_data,
int type,
int has_dependences,
const void *codeptr_ra)
{
if(new_task_data->ptr)
printf("0: new_task_data initially not null\n");
new_task_data->value = ompt_get_unique_id();
char buffer[2048];
format_task_type(type, buffer);
//there is no parallel_begin callback for implicit parallel region
//thus it is initialized in initial task
if(type & ompt_task_initial)
{
ompt_data_t *parallel_data;
ompt_get_parallel_info(0, ¶llel_data, NULL);
if(parallel_data->ptr)
printf("%s\n", "0: parallel_data initially not null");
parallel_data->value = ompt_get_unique_id();
}
printf("%" PRIu64 ": ompt_event_task_create: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, new_task_id=%" PRIu64 ", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n", ompt_get_thread_data()->value, encountering_task_data ? encountering_task_data->value : 0, encountering_task_frame ? encountering_task_frame->exit_frame.ptr : NULL, encountering_task_frame ? encountering_task_frame->enter_frame.ptr : NULL, new_task_data->value, codeptr_ra, buffer, type, has_dependences ? "yes" : "no");
}
static void
on_ompt_callback_task_schedule(
ompt_data_t *first_task_data,
ompt_task_status_t prior_task_status,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ": ompt_event_task_schedule: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value, ompt_task_status_t_values[prior_task_status], prior_task_status);
if(prior_task_status == ompt_task_complete)
{
printf("%" PRIu64 ": ompt_event_task_end: task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value);
}
}
static void
on_ompt_callback_dependences(
ompt_data_t *task_data,
const ompt_dependence_t *deps,
int ndeps)
{
printf("%" PRIu64 ": ompt_event_task_dependences: task_id=%" PRIu64 ", deps=%p, ndeps=%d\n", ompt_get_thread_data()->value, task_data->value, (void *)deps, ndeps);
}
static void
on_ompt_callback_task_dependence(
ompt_data_t *first_task_data,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ": ompt_event_task_dependence_pair: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value);
}
static void
on_ompt_callback_thread_begin(
ompt_thread_t thread_type,
ompt_data_t *thread_data)
{
if(thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_t_values[thread_type], thread_type, thread_data->value);
}
static void
on_ompt_callback_thread_end(
ompt_data_t *thread_data)
{
printf("%" PRIu64 ": ompt_event_thread_end: thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, thread_data->value);
}
static int
on_ompt_callback_control_tool(
uint64_t command,
uint64_t modifier,
void *arg,
const void *codeptr_ra)
{
ompt_frame_t* omptTaskFrame;
ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL);
printf("%" PRIu64 ": ompt_event_control_tool: command=%" PRIu64 ", modifier=%" PRIu64 ", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, current_task_frame.reenter=%p \n", ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra, omptTaskFrame->exit_frame.ptr, omptTaskFrame->enter_frame.ptr);
return 0; //success
}
#define register_callback_t(name, type) \
do{ \
type f_##name = &on_##name; \
if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \
ompt_set_never) \
printf("0: Could not register callback '" #name "'\n"); \
}while(0)
#define register_callback(name) register_callback_t(name, name##_t)
int ompt_initialize(
ompt_function_lookup_t lookup,
int initial_device_num,
ompt_data_t *tool_data)
{
ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_get_callback = (ompt_get_callback_t) lookup("ompt_get_callback");
ompt_get_state = (ompt_get_state_t) lookup("ompt_get_state");
ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info");
ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data");
ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info");
ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id");
ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs");
ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places");
ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids");
ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num");
ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums");
ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id");
ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states");
ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls");
register_callback(ompt_callback_mutex_acquire);
register_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t);
register_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t);
register_callback(ompt_callback_nest_lock);
register_callback(ompt_callback_sync_region);
register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t);
register_callback(ompt_callback_control_tool);
register_callback(ompt_callback_flush);
register_callback(ompt_callback_cancel);
register_callback(ompt_callback_implicit_task);
register_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t);
register_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t);
register_callback(ompt_callback_work);
register_callback(ompt_callback_master);
register_callback(ompt_callback_parallel_begin);
register_callback(ompt_callback_parallel_end);
register_callback(ompt_callback_task_create);
register_callback(ompt_callback_task_schedule);
register_callback(ompt_callback_dependences);
register_callback(ompt_callback_task_dependence);
register_callback(ompt_callback_thread_begin);
register_callback(ompt_callback_thread_end);
printf("0: NULL_POINTER=%p\n", (void*)NULL);
return 1; //success
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: ompt_event_runtime_shutdown\n");
}
#ifdef __cplusplus
extern "C" {
#endif
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
#ifdef __cplusplus
}
#endif
|
broadcast_reduce_customized-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file broadcast_reduce_customized-inl.h
* \brief CPU-specific Function definition of broadcast and reduce operators
*/
#ifndef MXNET_OPERATOR_NUMPY_LINALG_BROADCAST_REDUCE_CUSTOMIZED_INL_H_
#define MXNET_OPERATOR_NUMPY_LINALG_BROADCAST_REDUCE_CUSTOMIZED_INL_H_
#include "../../tensor/broadcast_reduce-inl.h"
namespace mxnet {
namespace op {
namespace broadcast {
using namespace mshadow;
using mxnet_op::dot;
using mxnet_op::ravel;
using mxnet_op::unravel;
using mxnet_op::unravel_dot;
template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP>
MSHADOW_XINLINE void seq_reduce_assign_wr(const index_t idx,
const size_t M,
const bool addto,
const DType* __restrict big,
OType* small,
const Shape<ndim>& bshape,
const Shape<ndim>& sshape,
const Shape<ndim>& rshape,
const Shape<ndim>& rstride,
Reducer* reducer) {
Shape<ndim> coord = unravel(idx, sshape);
index_t j = ravel(coord, bshape);
AType val, residual;
reducer->SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
coord = unravel(k, rshape);
reducer->Reduce(val, AType(OP::Map(big[j + dot(coord, rstride)])), residual);
}
reducer->Finalize(val, residual);
assign(&small[idx], addto, OType(val));
}
template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP>
void seq_reduce_compute_wr(const size_t N,
const size_t M,
const bool addto,
const DType* big,
OType* small,
const Shape<ndim> bshape,
const Shape<ndim> sshape,
const Shape<ndim> rshape,
const Shape<ndim> rstride,
Reducer* reducer) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign_wr<Reducer, ndim, AType, DType, OType, OP>(
idx, M, addto, big, small, bshape, sshape, rshape, rstride, reducer);
}
}
template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false>
void ReduceWithReducer(Stream<cpu>* s,
const TBlob& small,
const OpReqType req,
const Tensor<cpu, 1, char>& workspace,
const TBlob& big,
Reducer* reducer) {
if (req == kNullOp)
return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size(), M = rshape.Size();
if (!safe_acc) {
seq_reduce_compute_wr<Reducer, ndim, DType, DType, DType, OP>(N,
M,
req == kAddTo,
big.dptr<DType>(),
small.dptr<DType>(),
big.shape_.get<ndim>(),
small.shape_.get<ndim>(),
rshape,
rstride,
reducer);
} else {
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, {
typedef typename std::conditional<safe_acc, OType, DataType>::type OutType;
seq_reduce_compute_wr<Reducer, ndim, AccType, DataType, OutType, OP>(
N,
M,
req == kAddTo,
big.dptr<DataType>(),
small.dptr<OutType>(),
big.shape_.get<ndim>(),
small.shape_.get<ndim>(),
rshape,
rstride,
reducer);
});
});
}
}
template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
MSHADOW_XINLINE void seq_reduce_assign_wr(const index_t idx,
const size_t M,
const bool addto,
const DType* __restrict big,
const DType* __restrict lhs,
const DType* __restrict rhs,
DType* small,
const Shape<ndim>& big_shape,
const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0,
const Shape<ndim>& small_shape,
const Shape<ndim>& rshape,
const Shape<ndim>& lhs_shape,
const Shape<ndim>& rhs_shape,
const Shape<ndim>& rstride,
const Shape<ndim>& lhs_stride,
const Shape<ndim>& rhs_stride,
Reducer* reducer) {
Shape<ndim> coord = unravel(idx, small_shape);
const index_t idx_big0 = ravel(coord, big_shape);
const index_t idx_lhs0 = ravel(coord, lhs_shape0);
const index_t idx_rhs0 = ravel(coord, rhs_shape0);
DType val, residual;
reducer->SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Shape<ndim> coord_big = unravel(k, rshape);
index_t idx_big = idx_big0 + dot(coord_big, rstride);
Shape<ndim> coord_lhs = unravel(k, lhs_shape);
index_t idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride);
Shape<ndim> coord_rhs = unravel(k, rhs_shape);
index_t idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride);
reducer->Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual);
}
reducer->Finalize(val, residual);
assign(&small[idx], addto, val);
}
template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void seq_reduce_compute_wr(const size_t N,
const size_t M,
const bool addto,
const DType* big,
const DType* lhs,
const DType* rhs,
DType* small,
const Shape<ndim> big_shape,
const Shape<ndim> small_shape,
const Shape<ndim> rshape,
const Shape<ndim> rstride,
const Shape<ndim> lhs_shape,
const Shape<ndim> lhs_stride,
const Shape<ndim> rhs_shape,
const Shape<ndim> rhs_stride,
const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0,
Reducer* reducer) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign_wr<Reducer, ndim, DType, OP1, OP2>(idx,
M,
addto,
big,
lhs,
rhs,
small,
big_shape,
lhs_shape0,
rhs_shape0,
small_shape,
rshape,
lhs_shape,
rhs_shape,
rstride,
lhs_stride,
rhs_stride,
reducer);
}
}
template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void ReduceWithReducer(Stream<cpu>* s,
const TBlob& small,
const OpReqType req,
const Tensor<cpu, 1, char>& workspace,
const TBlob& big,
const TBlob& lhs,
const TBlob& rhs,
Reducer* reducer) {
if (req == kNullOp)
return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size();
size_t M = rshape.Size();
Shape<ndim> lhs_shape, lhs_stride;
diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride);
Shape<ndim> rhs_shape, rhs_stride;
diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride);
seq_reduce_compute_wr<Reducer, ndim, DType, OP1, OP2>(N,
M,
req == kAddTo,
big.dptr<DType>(),
lhs.dptr<DType>(),
rhs.dptr<DType>(),
small.dptr<DType>(),
big.shape_.get<ndim>(),
small.shape_.get<ndim>(),
rshape,
rstride,
lhs_shape,
lhs_stride,
rhs_shape,
rhs_stride,
lhs.shape_.get<ndim>(),
rhs.shape_.get<ndim>(),
reducer);
}
} // namespace broadcast
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_NUMPY_LINALG_BROADCAST_REDUCE_CUSTOMIZED_INL_H_
|
estimate_dt_utilities.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela, Ruben Zorrilla
//
//
#ifndef KRATOS_ESTIMATE_DT_UTILITIES_H
#define KRATOS_ESTIMATE_DT_UTILITIES_H
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/node.h"
#include "includes/element.h"
#include "includes/model_part.h"
#include "includes/kratos_parameters.h"
#include "includes/serializer.h"
#include "utilities/openmp_utils.h"
#include "utilities/geometry_utilities.h"
namespace Kratos
{
///@addtogroup FluidDynamicsApplication
///@{
///@name Kratos Classes
///@{
/// Estimate the time step in a fluid problem to obtain a given Courant number.
template< unsigned int TDim >
class EstimateDtUtility
{
public:
///@name Life Cycle
///@{
/// Constructor
/**
* @param ModelPart The model part containing the problem mesh
* @param CFL The user-defined Courant-Friedrichs-Lewy number
* @param DtMin user-defined minimum time increment allowed
* @param DtMax user-defined maximum time increment allowed
*/
EstimateDtUtility(ModelPart &ModelPart, const double CFL, const double DtMin, const double DtMax):
mrModelPart(ModelPart)
{
mCFL = CFL;
mDtMin = DtMin;
mDtMax = DtMax;
}
/// Constructor with Kratos parameters
/**
* @param ModelPart The model part containing the problem mesh
* @param rParameters Kratos parameters containing the CFL number and max time step
*/
EstimateDtUtility(ModelPart& ModelPart, Parameters& rParameters):
mrModelPart(ModelPart)
{
Parameters defaultParameters(R"({
"automatic_time_step" : true,
"CFL_number" : 1.0,
"minimum_delta_time" : 1e-4,
"maximum_delta_time" : 0.1
})");
rParameters.ValidateAndAssignDefaults(defaultParameters);
mCFL = rParameters["CFL_number"].GetDouble();
mDtMin = rParameters["minimum_delta_time"].GetDouble();
mDtMax = rParameters["maximum_delta_time"].GetDouble();
}
/// Destructor
~EstimateDtUtility()
{}
///@}
///@name Operations
///@{
/// Set the CFL value.
/**
* @param CFL the user-defined CFL number used in the automatic time step computation
*/
void SetCFL(const double CFL)
{
mCFL = CFL;
}
/// Set the maximum time step allowed value.
/**
* @param CFL the user-defined CFL number used in the automatic time step computation
*/
void SetDtMin(const double DtMin)
{
mDtMin = DtMin;
}
/// Set the maximum time step allowed value.
/**
* @param CFL the user-defined CFL number used in the automatic time step computation
*/
void SetDtMax(const double DtMax)
{
mDtMax = DtMax;
}
/// Calculate the maximum time step that satisfies the Courant-Friedrichs-Lewy (CFL) condition.
/**
* @return A time step value that satisfies the CFL condition for the current mesh and velocity field
*/
double EstimateDt()
{
KRATOS_TRY;
unsigned int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::PartitionVector ElementPartition;
OpenMPUtils::DivideInPartitions(mrModelPart.NumberOfElements(),NumThreads,ElementPartition);
double CurrentDt = mrModelPart.GetProcessInfo().GetValue(DELTA_TIME);
std::vector<double> MaxCFL(NumThreads,0.0);
#pragma omp parallel shared(MaxCFL)
{
int k = OpenMPUtils::ThisThread();
ModelPart::ElementIterator ElemBegin = mrModelPart.ElementsBegin() + ElementPartition[k];
ModelPart::ElementIterator ElemEnd = mrModelPart.ElementsBegin() + ElementPartition[k+1];
GeometryDataContainer GeometryInfo;
double MaxLocalCFL = 0.0;
for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
double ElementCFL = CalculateElementCFL(*itElem,GeometryInfo,CurrentDt);
if (ElementCFL > MaxLocalCFL)
{
MaxLocalCFL = ElementCFL;
}
}
MaxCFL[k] = MaxLocalCFL;
}
// Reduce to maximum the thread results
// Note that MSVC14 does not support max reductions, which are part of OpenMP 3.1
double CurrentCFL = MaxCFL[0];
for (unsigned int k = 1; k < NumThreads; k++)
{
if (CurrentCFL > MaxCFL[k]) CurrentCFL = MaxCFL[k];
}
double NewDt = 0.0;
// Avoid division by 0 when the maximum CFL number is close to 0 (e.g. problem initialization)
if (CurrentCFL < 1e-10)
{
KRATOS_INFO("EstimateDtUtility") << "Setting minimum delta time " << mDtMin << " as current time step." << std::endl;
NewDt = mDtMin;
}
else
{
// Compute new Dt
NewDt = mCFL * CurrentDt / CurrentCFL;
// Limit max and min Dt
if (NewDt > mDtMax)
{
NewDt = mDtMax;
}
else if (NewDt < mDtMin)
{
NewDt = mDtMin;
}
}
// Perform MPI sync if needed
NewDt = mrModelPart.GetCommunicator().GetDataCommunicator().MinAll(NewDt);
return NewDt;
KRATOS_CATCH("")
}
/// Calculate each element's CFL for the current time step for the given ModelPart.
/**
* The elemental CFL is stored in the CFL_NUMBER elemental variable.
* To view it in the post-process file, remember to print CFL_NUMBER as a Gauss Point result.
*/
static void CalculateLocalCFL(ModelPart& rModelPart)
{
KRATOS_TRY;
unsigned int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::PartitionVector ElementPartition;
OpenMPUtils::DivideInPartitions(rModelPart.NumberOfElements(),NumThreads,ElementPartition);
const double CurrentDt = rModelPart.GetProcessInfo().GetValue(DELTA_TIME);
#pragma omp parallel
{
int k = OpenMPUtils::ThisThread();
ModelPart::ElementIterator ElemBegin = rModelPart.ElementsBegin() + ElementPartition[k];
ModelPart::ElementIterator ElemEnd = rModelPart.ElementsBegin() + ElementPartition[k+1];
GeometryDataContainer GeometryInfo;
for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
double ElementCFL = EstimateDtUtility<TDim>::CalculateElementCFL(*itElem,GeometryInfo,CurrentDt);
itElem->SetValue(CFL_NUMBER,ElementCFL);
}
}
KRATOS_CATCH("")
}
/// Calculate each element's CFL for the current time step.
/**
* The elemental CFL is stored in the CFL_NUMBER elemental variable.
* To view it in the post-process file, remember to print CFL_NUMBER as a Gauss Point result.
*/
void CalculateLocalCFL()
{
EstimateDtUtility<TDim>::CalculateLocalCFL(mrModelPart);
}
///@} // Operators
private:
///@name Auxiliary Data types
///@{
struct GeometryDataContainer {
double Area;
array_1d<double, TDim+1> N;
BoundedMatrix<double, TDim+1, TDim> DN_DX;
};
///@}
///@name Member Variables
///@{
double mCFL; // User-defined CFL number
double mDtMax; // User-defined maximum time increment allowed
double mDtMin; // User-defined minimum time increment allowed
ModelPart &mrModelPart; // The problem's model part
///@} // Member variables
///@name Private Operations
///@{
static double CalculateElementCFL(Element &rElement, GeometryDataContainer& rGeometryInfo, double Dt)
{
double Proj = 0.0;
// Get the element's geometric parameters
const auto& r_geometry = rElement.GetGeometry();
GeometryUtils::CalculateGeometryData(r_geometry, rGeometryInfo.DN_DX, rGeometryInfo.N, rGeometryInfo.Area);
// Elemental Velocity
array_1d<double,3> ElementVel = rGeometryInfo.N[0]*r_geometry[0].FastGetSolutionStepValue(VELOCITY);
for (unsigned int i = 1; i < TDim+1; ++i)
ElementVel += rGeometryInfo.N[i]*r_geometry[i].FastGetSolutionStepValue(VELOCITY);
// Calculate u/h as the maximum projection of the velocity along element heights
for (unsigned int i = 0; i < TDim+1; ++i)
{
for (unsigned int d = 0; d < TDim; ++d)
Proj += ElementVel[d]*rGeometryInfo.DN_DX(i,d);
Proj = fabs(Proj);
}
return Proj*Dt;
}
///@} // Private Operations
};
///@} // Kratos classes
///@}
} // namespace Kratos.
#endif /* KRATOS_ESTIMATE_DT_UTILITIES_H */
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short int
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[256],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(CompositeOperator op)
{
const char
*blend_mode;
switch (op)
{
case ColorBurnCompositeOp: blend_mode = "idiv"; break;
case ColorDodgeCompositeOp: blend_mode = "div "; break;
case ColorizeCompositeOp: blend_mode = "colr"; break;
case DarkenCompositeOp: blend_mode = "dark"; break;
case DifferenceCompositeOp: blend_mode = "diff"; break;
case DissolveCompositeOp: blend_mode = "diss"; break;
case ExclusionCompositeOp: blend_mode = "smud"; break;
case HardLightCompositeOp: blend_mode = "hLit"; break;
case HardMixCompositeOp: blend_mode = "hMix"; break;
case HueCompositeOp: blend_mode = "hue "; break;
case LightenCompositeOp: blend_mode = "lite"; break;
case LinearBurnCompositeOp: blend_mode = "lbrn"; break;
case LinearDodgeCompositeOp:blend_mode = "lddg"; break;
case LinearLightCompositeOp:blend_mode = "lLit"; break;
case LuminizeCompositeOp: blend_mode = "lum "; break;
case MultiplyCompositeOp: blend_mode = "mul "; break;
case OverCompositeOp: blend_mode = "norm"; break;
case OverlayCompositeOp: blend_mode = "over"; break;
case PinLightCompositeOp: blend_mode = "pLit"; break;
case SaturateCompositeOp: blend_mode = "sat "; break;
case ScreenCompositeOp: blend_mode = "scrn"; break;
case SoftLightCompositeOp: blend_mode = "sLit"; break;
case VividLightCompositeOp: blend_mode = "vLit"; break;
default: blend_mode = "norm";
}
return(blend_mode);
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace)
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=background;
SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
image->alpha_trait=BlendPixelTrait;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
else if (image->depth > 8)
return(2);
}
else
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static void ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image,ExceptionInfo *exception)
{
const unsigned char
*p;
StringInfo
*profile;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
if (length < 16)
return;
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
(void) SetImageProfile(image,"8bim",profile,exception);
profile=DestroyStringInfo(profile);
for (p=blocks; (p >= blocks) && (p < (blocks+length-16)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if ((p+count) > (blocks+length-16))
return;
switch (id)
{
case 0x03ed:
{
char
value[MagickPathExtent];
unsigned short
resolution;
/*
Resolution info.
*/
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",image->resolution.x);
(void) SetImageProperty(image,"tiff:XResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",image->resolution.y);
(void) SetImageProperty(image,"tiff:YResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if (*(p+4) == 0)
*has_merged_image=MagickFalse;
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
return;
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
if (packet_size == 1)
SetPixelIndex(image,ScaleQuantumToChar(pixel),q);
else
SetPixelIndex(image,ScaleQuantumToShort(pixel),q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t)
ConstrainColormapIndex(image,GetPixelIndex(image,q),exception),q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image, pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
if (channels == 1 || type == -2)
SetPixelGray(image,pixel,q);
break;
}
case 1:
{
if (image->storage_class == PseudoClass)
SetPixelAlpha(image,pixel,q);
else
SetPixelGreen(image,pixel,q);
break;
}
case 2:
{
if (image->storage_class == PseudoClass)
SetPixelAlpha(image,pixel,q);
else
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const size_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
count,
row_size;
ssize_t
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > row_size + 256) // arbitrary number
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
(void) ReadBlob(image,compact_size,compact_pixels);
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
}
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if (layer_info->channel_info[channel].type < -1)
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickTrue;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
layer_info->mask.image=mask;
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
if (psd_info->mode == CMYKMode)
SetImageColorspace(layer_info->image,CMYKColorspace,exception);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
SetImageColorspace(layer_info->image,GRAYColorspace,exception);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j,
compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
status=MagickFalse;
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(short) ReadBlobShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(layer_info,0,(size_t) number_layers*
sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=ReadBlobSignedLong(image);
layer_info[i].page.x=ReadBlobSignedLong(image);
y=ReadBlobSignedLong(image);
x=ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=ReadBlobSignedLong(image);
layer_info[i].mask.page.x=ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (ReadBlobLong(image)-
layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (ReadBlobLong(image)-
layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
/*
We read it, but don't use it...
*/
for (j=0; j < (ssize_t) length; j+=8)
{
size_t blend_source=ReadBlobLong(image);
size_t blend_dest=ReadBlobLong(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" source(%x), dest(%x)",(unsigned int)
blend_source,(unsigned int) blend_dest);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType)
number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,i,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,i,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
ssize_t
count;
unsigned char
*data;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count == 0) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if (SetImageBackgroundColor(image,exception) == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (psd_info.mode == LabMode)
SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
SetImageColorspace(image,CMYKColorspace,exception);
if (psd_info.channels > 4)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
}
else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
status=AcquireImageColormap(image,psd_info.depth != 16 ? 256 : 65536,
exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
SetImageColorspace(image,GRAYColorspace,exception);
if (psd_info.channels > 1)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
}
else
if (psd_info.channels > 3)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if (psd_info.mode == DuotoneMode)
{
/*
Duotone image data; the format of this data is undocumented.
*/
data=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*data));
if (data == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ReadBlob(image,(size_t) length,data);
data=(unsigned char *) RelinquishMagickMemory(data);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
ParseImageResourceBlocks(image,blocks,(size_t) length,&has_merged_image,
exception);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayers(image,image_info,&psd_info,skip_layers,exception) !=
MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
if ((has_merged_image != MagickFalse) || (GetImageListLength(image) == 1))
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) &&
(length != 0))
{
SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayers(image,image_info,&psd_info,MagickFalse,exception);
if (status != MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (GetImageListLength(image) == 1)
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
SetImageAlphaChannel(image,TransparentAlphaChannel,exception);
image->background_color.alpha=TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned short) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=(WriteBlobMSBLong(image,(unsigned short) size));
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobMSBLong(image,(unsigned int) size));
return(WriteBlobMSBLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBLong(image,(unsigned int) size);
else
result=WriteBlobMSBLongLong(image,size);
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (next_image->compression == RLECompression)
{
length=WriteBlobMSBShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
length=WriteBlobMSBShort(image,ZipWithoutPrediction);
#endif
else
length=WriteBlobMSBShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
int
y;
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
if (next_image->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsImageGray(next_image) == MagickFalse)
channels=next_image->colorspace == CMYKColorspace ? 4 : 3;
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (mask->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=WriteBlobMSBSignedShort(image,channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) CopyMagickMemory(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)))
{
(void) CopyMagickMemory(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(*p++);
key[1]=(*p++);
key[2]=(*p++);
key[3]=(*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) CopyMagickMemory(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*icc_profile,
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
PSDInfo
psd_info;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
num_channels,
packet_size,
rounded_size,
size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
if (SetImageGray(image,exception) != MagickFalse)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[i].green));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((MagickOffsetType) GetStringInfoLength(icc_profile) !=
PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
SetPSDSize(&psd_info,image,0);
SetPSDSize(&psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobMSBShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobMSBShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=strlen(property) == 9 ? 255 : 0;
}
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y);
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x);
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+
next_image->rows));
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+
next_image->columns));
channels=1U;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=next_image->colorspace == CMYKColorspace ? 4U : 3U;
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobMSBShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(&psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(&psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(&psd_info,image,-2);
size+=WriteBlob(image,4,(const unsigned char *) "8BIM");
size+=WriteBlob(image,4,(const unsigned char *)
CompositeOperatorToPSDBlendMode(next_image->compose));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ?
1 << 0x02 : 1); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobMSBLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobMSBLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobMSBLong(image,20);
size+=WriteBlobMSBSignedLong(image,mask->page.y);
size+=WriteBlobMSBSignedLong(image,mask->page.x);
size+=WriteBlobMSBLong(image,(const unsigned int) mask->rows+
mask->page.y);
size+=WriteBlobMSBLong(image,(const unsigned int) mask->columns+
mask->page.x);
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0);
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobMSBLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(&psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write the total size
*/
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 16),size_offset);
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(&psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
attention.c | #include "darknet.h"
#include <sys/time.h>
#include <assert.h>
void extend_data_truth(data* d, int n, float val)
{
int i, j;
for (i = 0; i < d->y.rows; ++i)
{
d->y.vals[i] = realloc(d->y.vals[i], (d->y.cols + n) * sizeof(float));
for (j = 0; j < n; ++j)
{
d->y.vals[i][d->y.cols + j] = val;
}
}
d->y.cols += n;
}
matrix network_loss_data(network* net, data test)
{
int i, b;
int k = 1;
matrix pred = make_matrix(test.X.rows, k);
float* X = calloc(net->batch * test.X.cols, sizeof(float));
float* y = calloc(net->batch * test.y.cols, sizeof(float));
for (i = 0; i < test.X.rows; i += net->batch)
{
for (b = 0; b < net->batch; ++b)
{
if (i + b == test.X.rows)
break;
memcpy(X + b * test.X.cols, test.X.vals[i + b], test.X.cols * sizeof(float));
memcpy(y + b * test.y.cols, test.y.vals[i + b], test.y.cols * sizeof(float));
}
network orig = *net;
net->input = X;
net->truth = y;
net->train = 0;
net->delta = 0;
forward_network(net);
*net = orig;
float* delta = net->layers[net->n - 1].output;
for (b = 0; b < net->batch; ++b)
{
if (i + b == test.X.rows)
break;
int t = max_index(y + b * test.y.cols, 1000);
float err = sum_array(delta + b * net->outputs, net->outputs);
pred.vals[i + b][0] = -err;
// pred.vals[i+b][0] = 1-delta[b*net->outputs + t];
}
}
free(X);
free(y);
return pred;
}
void train_attention(char* datacfg, char* cfgfile, char* weightfile, int* gpus, int ngpus, int clear)
{
int i, j;
float avg_cls_loss = -1;
float avg_att_loss = -1;
char* base = basecfg(cfgfile);
printf("%s\n", base);
printf("%d\n", ngpus);
network** nets = calloc(ngpus, sizeof(network*));
srand(time(0));
int seed = rand();
for (i = 0; i < ngpus; ++i)
{
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network* net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
list* options = read_data_cfg(datacfg);
char* backup_directory = option_find_str(options, "backup", "/backup/");
char* label_list = option_find_str(options, "labels", "data/labels.list");
char* train_list = option_find_str(options, "train", "data/train.list");
int classes = option_find_int(options, "classes", 2);
char** labels = get_labels(label_list);
list* plist = get_paths(train_list);
char** paths = (char**)list_to_array(plist);
printf("%d\n", plist->size);
int N = plist->size;
double time;
int divs = 3;
int size = 2;
load_args args = { 0 };
args.w = divs * net->w / size;
args.h = divs * net->h / size;
args.size = divs * net->w / size;
args.threads = 32;
args.hierarchy = net->hierarchy;
args.min = net->min_ratio * args.w;
args.max = net->max_ratio * args.w;
args.angle = net->angle;
args.aspect = net->aspect;
args.exposure = net->exposure;
args.saturation = net->saturation;
args.hue = net->hue;
args.paths = paths;
args.classes = classes;
args.n = imgs;
args.m = N;
args.labels = labels;
args.type = CLASSIFICATION_DATA;
data train;
data buffer;
pthread_t load_thread;
args.d = &buffer;
load_thread = load_data(args);
int epoch = (*net->seen) / N;
while (get_current_batch(net) < net->max_batches || net->max_batches == 0)
{
time = what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
data resized = resize_data(train, net->w, net->h);
extend_data_truth(&resized, divs * divs, 0);
data* tiles = tile_data(train, divs, size);
printf("Loaded: %lf seconds\n", what_time_is_it_now() - time);
time = what_time_is_it_now();
float aloss = 0;
float closs = 0;
int z;
for (i = 0; i < divs * divs / ngpus; ++i)
{
#pragma omp parallel for
for (j = 0; j < ngpus; ++j)
{
int index = i * ngpus + j;
extend_data_truth(tiles + index, divs * divs, SECRET_NUM);
matrix deltas = network_loss_data(nets[j], tiles[index]);
for (z = 0; z < resized.y.rows; ++z)
{
resized.y.vals[z][train.y.cols + index] = deltas.vals[z][0];
}
free_matrix(deltas);
}
}
int* inds = calloc(resized.y.rows, sizeof(int));
for (z = 0; z < resized.y.rows; ++z)
{
int index = max_index(resized.y.vals[z] + train.y.cols, divs * divs);
inds[z] = index;
for (i = 0; i < divs * divs; ++i)
{
resized.y.vals[z][train.y.cols + i] = (i == index) ? 1 : 0;
}
}
data best = select_data(tiles, inds);
free(inds);
#ifdef GPU
if (ngpus == 1)
{
closs = train_network(net, best);
}
else
{
closs = train_networks(nets, ngpus, best, 4);
}
#endif
for (i = 0; i < divs * divs; ++i)
{
printf("%.2f ", resized.y.vals[0][train.y.cols + i]);
if ((i + 1) % divs == 0)
printf("\n");
free_data(tiles[i]);
}
free_data(best);
printf("\n");
image im = float_to_image(64, 64, 3, resized.X.vals[0]);
// show_image(im, "orig");
// cvWaitKey(100);
/*
image im1 = float_to_image(64,64,3,tiles[i].X.vals[0]);
image im2 = float_to_image(64,64,3,resized.X.vals[0]);
show_image(im1, "tile");
show_image(im2, "res");
*/
#ifdef GPU
if (ngpus == 1)
{
aloss = train_network(net, resized);
}
else
{
aloss = train_networks(nets, ngpus, resized, 4);
}
#endif
for (i = 0; i < divs * divs; ++i)
{
printf("%f ", nets[0]->output[1000 + i]);
if ((i + 1) % divs == 0)
printf("\n");
}
printf("\n");
free_data(resized);
free_data(train);
if (avg_cls_loss == -1)
avg_cls_loss = closs;
if (avg_att_loss == -1)
avg_att_loss = aloss;
avg_cls_loss = avg_cls_loss * .9 + closs * .1;
avg_att_loss = avg_att_loss * .9 + aloss * .1;
printf("%ld, %.3f: Att: %f, %f avg, Class: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net),
(float)(*net->seen) / N, aloss, avg_att_loss, closs, avg_cls_loss, get_current_rate(net),
what_time_is_it_now() - time, *net->seen);
if (*net->seen / N > epoch)
{
epoch = *net->seen / N;
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, epoch);
save_weights(net, buff);
}
if (get_current_batch(net) % 1000 == 0)
{
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
}
char buff[256];
sprintf(buff, "%s/%s.weights", backup_directory, base);
save_weights(net, buff);
pthread_join(load_thread, 0);
free_network(net);
free_ptrs((void**)labels, classes);
free_ptrs((void**)paths, plist->size);
free_list(plist);
free(base);
}
void validate_attention_single(char* datacfg, char* filename, char* weightfile)
{
int i, j;
network* net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
list* options = read_data_cfg(datacfg);
char* label_list = option_find_str(options, "labels", "data/labels.list");
char* leaf_list = option_find_str(options, "leaves", 0);
if (leaf_list)
change_leaves(net->hierarchy, leaf_list);
char* valid_list = option_find_str(options, "valid", "data/train.list");
int classes = option_find_int(options, "classes", 2);
int topk = option_find_int(options, "top", 1);
char** labels = get_labels(label_list);
list* plist = get_paths(valid_list);
char** paths = (char**)list_to_array(plist);
int m = plist->size;
free_list(plist);
float avg_acc = 0;
float avg_topk = 0;
int* indexes = calloc(topk, sizeof(int));
int divs = 4;
int size = 2;
int extra = 0;
float* avgs = calloc(classes, sizeof(float));
int* inds = calloc(divs * divs, sizeof(int));
for (i = 0; i < m; ++i)
{
int class = -1;
char* path = paths[i];
for (j = 0; j < classes; ++j)
{
if (strstr(path, labels[j]))
{
class = j;
break;
}
}
image im = load_image_color(paths[i], 0, 0);
image resized = resize_min(im, net->w * divs / size);
image crop = crop_image(resized, (resized.w - net->w * divs / size) / 2, (resized.h - net->h * divs / size) / 2,
net->w * divs / size, net->h * divs / size);
image rcrop = resize_image(crop, net->w, net->h);
// show_image(im, "orig");
// show_image(crop, "cropped");
// cvWaitKey(0);
float* pred = network_predict(net, rcrop.data);
// pred[classes + 56] = 0;
for (j = 0; j < divs * divs; ++j)
{
printf("%.2f ", pred[classes + j]);
if ((j + 1) % divs == 0)
printf("\n");
}
printf("\n");
copy_cpu(classes, pred, 1, avgs, 1);
top_k(pred + classes, divs * divs, divs * divs, inds);
show_image(crop, "crop");
for (j = 0; j < extra; ++j)
{
int index = inds[j];
int row = index / divs;
int col = index % divs;
int y = row * crop.h / divs - (net->h - crop.h / divs) / 2;
int x = col * crop.w / divs - (net->w - crop.w / divs) / 2;
printf("%d %d %d %d\n", row, col, y, x);
image tile = crop_image(crop, x, y, net->w, net->h);
float* pred = network_predict(net, tile.data);
axpy_cpu(classes, 1., pred, 1, avgs, 1);
show_image(tile, "tile");
// cvWaitKey(10);
}
if (net->hierarchy)
hierarchy_predictions(pred, net->outputs, net->hierarchy, 1, 1);
if (rcrop.data != resized.data)
free_image(rcrop);
if (resized.data != im.data)
free_image(resized);
free_image(im);
free_image(crop);
top_k(pred, classes, topk, indexes);
if (indexes[0] == class)
avg_acc += 1;
for (j = 0; j < topk; ++j)
{
if (indexes[j] == class)
avg_topk += 1;
}
printf("%d: top 1: %f, top %d: %f\n", i, avg_acc / (i + 1), topk, avg_topk / (i + 1));
}
}
void validate_attention_multi(char* datacfg, char* filename, char* weightfile)
{
int i, j;
network* net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
list* options = read_data_cfg(datacfg);
char* label_list = option_find_str(options, "labels", "data/labels.list");
char* valid_list = option_find_str(options, "valid", "data/train.list");
int classes = option_find_int(options, "classes", 2);
int topk = option_find_int(options, "top", 1);
char** labels = get_labels(label_list);
list* plist = get_paths(valid_list);
int scales[] = { 224, 288, 320, 352, 384 };
int nscales = sizeof(scales) / sizeof(scales[0]);
char** paths = (char**)list_to_array(plist);
int m = plist->size;
free_list(plist);
float avg_acc = 0;
float avg_topk = 0;
int* indexes = calloc(topk, sizeof(int));
for (i = 0; i < m; ++i)
{
int class = -1;
char* path = paths[i];
for (j = 0; j < classes; ++j)
{
if (strstr(path, labels[j]))
{
class = j;
break;
}
}
float* pred = calloc(classes, sizeof(float));
image im = load_image_color(paths[i], 0, 0);
for (j = 0; j < nscales; ++j)
{
image r = resize_min(im, scales[j]);
resize_network(net, r.w, r.h);
float* p = network_predict(net, r.data);
if (net->hierarchy)
hierarchy_predictions(p, net->outputs, net->hierarchy, 1, 1);
axpy_cpu(classes, 1, p, 1, pred, 1);
flip_image(r);
p = network_predict(net, r.data);
axpy_cpu(classes, 1, p, 1, pred, 1);
if (r.data != im.data)
free_image(r);
}
free_image(im);
top_k(pred, classes, topk, indexes);
free(pred);
if (indexes[0] == class)
avg_acc += 1;
for (j = 0; j < topk; ++j)
{
if (indexes[j] == class)
avg_topk += 1;
}
printf("%d: top 1: %f, top %d: %f\n", i, avg_acc / (i + 1), topk, avg_topk / (i + 1));
}
}
void predict_attention(char* datacfg, char* cfgfile, char* weightfile, char* filename, int top)
{
network* net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
list* options = read_data_cfg(datacfg);
char* name_list = option_find_str(options, "names", 0);
if (!name_list)
name_list = option_find_str(options, "labels", "data/labels.list");
if (top == 0)
top = option_find_int(options, "top", 1);
int i = 0;
char** names = get_labels(name_list);
clock_t time;
int* indexes = calloc(top, sizeof(int));
char buff[256];
char* input = buff;
while (1)
{
if (filename)
{
strncpy(input, filename, 256);
}
else
{
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if (!input)
return;
strtok(input, "\n");
}
image im = load_image_color(input, 0, 0);
image r = letterbox_image(im, net->w, net->h);
// resize_network(&net, r.w, r.h);
// printf("%d %d\n", r.w, r.h);
float* X = r.data;
time = clock();
float* predictions = network_predict(net, X);
if (net->hierarchy)
hierarchy_predictions(predictions, net->outputs, net->hierarchy, 1, 1);
top_k(predictions, net->outputs, top, indexes);
fprintf(stderr, "%s: Predicted in %f seconds.\n", input, sec(clock() - time));
for (i = 0; i < top; ++i)
{
int index = indexes[i];
// if(net->hierarchy) printf("%d, %s: %f, parent: %s \n",index, names[index], predictions[index],
// (net->hierarchy->parent[index] >= 0) ? names[net->hierarchy->parent[index]] : "Root");
// else printf("%s: %f\n",names[index], predictions[index]);
printf("%5.2f%%: %s\n", predictions[index] * 100, names[index]);
}
if (r.data != im.data)
free_image(r);
free_image(im);
if (filename)
break;
}
}
void run_attention(int argc, char** argv)
{
if (argc < 4)
{
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char* gpu_list = find_char_arg(argc, argv, "-gpus", 0);
int ngpus;
int* gpus = read_intlist(gpu_list, &ngpus, gpu_index);
int top = find_int_arg(argc, argv, "-t", 0);
int clear = find_arg(argc, argv, "-clear");
char* data = argv[3];
char* cfg = argv[4];
char* weights = (argc > 5) ? argv[5] : 0;
char* filename = (argc > 6) ? argv[6] : 0;
char* layer_s = (argc > 7) ? argv[7] : 0;
if (0 == strcmp(argv[2], "predict"))
predict_attention(data, cfg, weights, filename, top);
else if (0 == strcmp(argv[2], "train"))
train_attention(data, cfg, weights, gpus, ngpus, clear);
else if (0 == strcmp(argv[2], "valid"))
validate_attention_single(data, cfg, weights);
else if (0 == strcmp(argv[2], "validmulti"))
validate_attention_multi(data, cfg, weights);
}
|
adjointnavierstokes.h | //*****************************************************************************
// Title : src/equation/adjointnavierstokes.h
// Author : Tanabe Yuta
// Date : 2021/08/03
// Copyright : (C)2021 TanabeYuta
//*****************************************************************************
#pragma once
#ifdef _USE_AVX_DEFINES
#include "../equation_avx/adjointnavierstokes_avx.h"
#endif
namespace PANSLBM2 {
namespace ANS {
// Function of updating macroscopic values of ANS for 2D
template<class T, template<class>class P>
void Macro(T &_ip, T &_iux, T &_iuy, T &_imx, T &_imy, T _rho, T _ux, T _uy, const T *_f0, const T *_f, int _idx) {
T uu = _ux*_ux + _uy*_uy;
_ip = _f0[_idx]*P<T>::ei[0]*(1.0 - 1.5*uu);
_iux = -_f0[_idx]*P<T>::ei[0]*_ux;
_iuy = -_f0[_idx]*P<T>::ei[0]*_uy;
_imx = T();
_imy = T();
for (int c = 1; c < P<T>::nc; ++c) {
T ciu = P<T>::cx[c]*_ux + P<T>::cy[c]*_uy;
T fei = _f[P<T>::IndexF(_idx, c)]*P<T>::ei[c];
_ip += fei*(1.0 + 3.0*ciu + 4.5*ciu*ciu - 1.5*uu);
_iux += fei*(P<T>::cx[c] + 3.0*ciu*P<T>::cx[c] - _ux);
_iuy += fei*(P<T>::cy[c] + 3.0*ciu*P<T>::cy[c] - _uy);
_imx += fei*P<T>::cx[c];
_imy += fei*P<T>::cy[c];
}
}
// Function of updating macroscopic values of ANS for 3D
template<class T, template<class>class P>
void Macro(T &_ip, T &_iux, T &_iuy, T &_iuz, T &_imx, T &_imy, T &_imz, T _rho, T _ux, T _uy, T _uz, const T *_f0, const T *_f, int _idx) {
T uu = _ux*_ux + _uy*_uy + _uz*_uz;
_ip = _f0[_idx]*P<T>::ei[0]*(1.0 - 1.5*uu);
_iux = -_f0[_idx]*P<T>::ei[0]*_ux;
_iuy = -_f0[_idx]*P<T>::ei[0]*_uy;
_iuz = -_f0[_idx]*P<T>::ei[0]*_uz;
_imx = T();
_imy = T();
_imz = T();
for (int c = 1; c < P<T>::nc; ++c) {
T ciu = P<T>::cx[c]*_ux + P<T>::cy[c]*_uy + P<T>::cz[c]*_uz;
T fei = _f[P<T>::IndexF(_idx, c)]*P<T>::ei[c];
_ip += fei*(1.0 + 3.0*ciu + 4.5*ciu*ciu - 1.5*uu);
_iux += fei*(P<T>::cx[c] + 3.0*ciu*P<T>::cx[c] - _ux);
_iuy += fei*(P<T>::cy[c] + 3.0*ciu*P<T>::cy[c] - _uy);
_iuz += fei*(P<T>::cz[c] + 3.0*ciu*P<T>::cz[c] - _uz);
_imx += fei*P<T>::cx[c];
_imy += fei*P<T>::cy[c];
_imz += fei*P<T>::cz[c];
}
}
// Function of getting equilibrium of ANS for 2D
template<class T, template<class>class P>
void Equilibrium(T *_feq, T _ux, T _uy, T _ip, T _iux, T _iuy) {
for (int c = 0; c < P<T>::nc; ++c) {
_feq[c] = _ip + 3.0*(_iux*(P<T>::cx[c] - _ux) + _iuy*(P<T>::cy[c] - _uy));
}
}
// Function of getting equilibrium of ANS for 3D
template<class T, template<class>class P>
void Equilibrium(T *_feq, T _ux, T _uy, T _uz, T _ip, T _iux, T _iuy, T _iuz) {
for (int c = 0; c < P<T>::nc; ++c) {
_feq[c] = _ip + 3.0*(_iux*(P<T>::cx[c] - _ux) + _iuy*(P<T>::cy[c] - _uy) + _iuz*(P<T>::cz[c] - _uz));
}
}
// Function of applying external force with Brinkman model of ANS for 2D
template<class T, template<class>class P>
void ExternalForceBrinkman(T _rho, T _ux, T _uy, T _imx, T _imy, T *_f0, T *_f, T _alpha, int _idx) {
T coef = 3.0*_alpha/(_rho + _alpha);
_f0[_idx] -= -coef*(_ux*_imx + _uy*_imy);
for (int c = 1; c < P<T>::nc; ++c) {
_f[P<T>::IndexF(_idx, c)] -= coef*((P<T>::cx[c] - _ux)*_imx + (P<T>::cy[c] - _uy)*_imy);
}
}
// Function of applying external force with Brinkman model of ANS for 3D
template<class T, template<class>class P>
void ExternalForceBrinkman(T _rho, T _ux, T _uy, T _uz, T _imx, T _imy, T _imz, T *_f0, T *_f, T _alpha, int _idx) {
T coef = 3.0*_alpha/(_rho + _alpha);
_f0[_idx] -= -coef*(_ux*_imx + _uy*_imy + _uz*_imz);
for (int c = 1; c < P<T>::nc; ++c) {
_f[P<T>::IndexF(_idx, c)] -= coef*((P<T>::cx[c] - _ux)*_imx + (P<T>::cy[c] - _uy)*_imy + (P<T>::cz[c] - _uz)*_imz);
}
}
// Function of Update macro, External force(Brinkman model) and Collide of ANS for 2D
template<class T, template<class>class P>
void MacroBrinkmanCollide(
P<T>& _p, const T *_rho, const T *_ux, const T *_uy,
T *_ip, T *_iux, T *_iuy, T *_imx, T *_imy,
T _viscosity, const T *_alpha, bool _issave = false
) {
T omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<T>::nc];
#pragma omp parallel for private(feq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
// Update macro
T ip, iux, iuy, imx, imy;
Macro<T, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx);
// External force with Brinkman model
ExternalForceBrinkman<T, P>(_rho[idx], _ux[idx], _uy[idx], imx, imy, _p.f0, _p.f, _alpha[idx], idx);
Macro<T, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx);
// Save macro if need
if (_issave) {
_ip[idx] = ip;
_iux[idx] = iux;
_iuy[idx] = iuy;
_imx[idx] = imx;
_imy[idx] = imy;
}
// Collide
Equilibrium<T, P>(feq, _ux[idx], _uy[idx], ip, iux, iuy);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
// Function of Update macro, External force(Brinkman model) and Collide of ANS for 3D
template<class T, template<class>class P>
void MacroBrinkmanCollide(
P<T>& _p, const T *_rho, const T *_ux, const T *_uy, const T *_uz,
T *_ip, T *_iux, T *_iuy, T *_iuz, T *_imx, T *_imy, T *_imz,
T _viscosity, const T *_alpha, bool _issave = false
) {
T omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<T>::nc];
#pragma omp parallel for private(feq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
// Update macro
T ip, iux, iuy, iuz, imx, imy, imz;
Macro<T, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx);
// External force with Brinkman model
ExternalForceBrinkman<T, P>(_rho[idx], _ux[idx], _uy[idx], _uz[idx], imx, imy, imz, _p.f0, _p.f, _alpha[idx], idx);
Macro<T, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx);
// Save macro if need
if (_issave) {
_ip[idx] = ip;
_iux[idx] = iux;
_iuy[idx] = iuy;
_iuz[idx] = iuz;
_imx[idx] = imx;
_imy[idx] = imy;
_imz[idx] = imz;
}
// Collide and stream
Equilibrium<T, P>(feq, _ux[idx], _uy[idx], _uz[idx], ip, iux, iuy, iuz);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
// Function of setting initial condition of ANS for 2D
template<class T, template<class>class P>
void InitialCondition(P<T>& _p, const T *_ux, const T *_uy, const T *_ip, const T *_iux, const T *_iuy) {
T feq[P<T>::nc];
#pragma omp parallel for private(feq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
Equilibrium<T, P>(feq, _ux[idx], _uy[idx], _ip[idx], _iux[idx], _iuy[idx]);
_p.f0[idx] = feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
_p.f[P<T>::IndexF(idx, c)] = feq[c];
}
}
}
// Function of setting initial condition of ANS for 3D
template<class T, template<class>class P>
void InitialCondition(P<T>& _p, const T *_ux, const T *_uy, const T *_uz, const T *_ip, const T *_iux, const T *_iuy, const T *_iuz) {
T feq[P<T>::nc];
#pragma omp parallel for private(feq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
Equilibrium<T, P>(feq, _ux[idx], _uy[idx], _uz[idx], _ip[idx], _iux[idx], _iuy[idx], _iuz[idx]);
_p.f0[idx] = feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
_p.f[P<T>::IndexF(idx, c)] = feq[c];
}
}
}
// Function of setting boundary condition of ANS set iU for D2Q9
template<class T, template<class>class P, class Fv0, class Fv1, class Ff>
void iBoundaryConditionSetU(P<T>& _p, Fv0 _uxbc, Fv1 _uybc, Ff _bctype, T _eps = T()) {
// On xmin
if (_p.PEx == 0) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype(0 + _p.offsetx, j + _p.offsety)) {
int idx = _p.Index(0, j);
T rho0 = (-2.0*_eps + _uxbc(0 + _p.offsetx, j + _p.offsety)*(4.0*_p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 8)]) + 3.0*_uybc(0 + _p.offsetx, j + _p.offsety)*(_p.f[P<T>::IndexF(idx, 5)] - _p.f[P<T>::IndexF(idx, 8)]))/(3.0*(1.0 - _uxbc(0 + _p.offsetx, j + _p.offsety)));
_p.f[P<T>::IndexF(idx, 3)] = _p.f[P<T>::IndexF(idx, 1)] + rho0;
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] + rho0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] + rho0;
}
}
}
// On xmax
if (_p.PEx == _p.mx - 1) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety)) {
int idx = _p.Index(_p.nx - 1, j);
T rho0 = (-2.0*_eps - _uxbc((_p.nx - 1) + _p.offsetx, j + _p.offsety)*(4.0*_p.f[P<T>::IndexF(idx, 3)] + _p.f[P<T>::IndexF(idx, 6)] + _p.f[P<T>::IndexF(idx, 7)]) + 3.0*_uybc((_p.nx - 1) + _p.offsetx, j + _p.offsety)*(_p.f[P<T>::IndexF(idx, 6)] - _p.f[P<T>::IndexF(idx, 7)]))/(3.0*(1.0 + _uxbc((_p.nx - 1) + _p.offsetx, j + _p.offsety)));
_p.f[P<T>::IndexF(idx, 1)] = _p.f[P<T>::IndexF(idx, 3)] + rho0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] + rho0;
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] + rho0;
}
}
}
// On ymin
if (_p.PEy == 0) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, 0 + _p.offsety)) {
int idx = _p.Index(i, 0);
T rho0 = (-2.0*_eps + _uxbc(i + _p.offsetx, 0 + _p.offsety)*(4.0*_p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 6)]) + 3.0*_uxbc(i + _p.offsetx, 0 + _p.offsety)*(_p.f[P<T>::IndexF(idx, 5)] - _p.f[P<T>::IndexF(idx, 6)]))/(3.0*(1.0 - _uxbc(i + _p.offsetx, 0 + _p.offsety)));
_p.f[P<T>::IndexF(idx, 4)] = _p.f[P<T>::IndexF(idx, 2)] + rho0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] + rho0;
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] + rho0;
}
}
}
// On ymax
if (_p.PEy == _p.my - 1) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety)) {
int idx = _p.Index(i, _p.ny - 1);
T rho0 = (-2.0*_eps - _uxbc(i + _p.offsetx, (_p.ny - 1) + _p.offsety)*(4.0*_p.f[P<T>::IndexF(idx, 4)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)]) + 3.0*_uxbc(i + _p.offsetx, (_p.ny - 1) + _p.offsety)*(_p.f[P<T>::IndexF(idx, 8)] - _p.f[P<T>::IndexF(idx, 7)]))/(3.0*(1.0 + _uxbc(i + _p.offsetx, (_p.ny - 1) + _p.offsety)));
_p.f[P<T>::IndexF(idx, 2)] = _p.f[P<T>::IndexF(idx, 4)] + rho0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] + rho0;
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] + rho0;
}
}
}
}
// Function of setting boundary condition of ANS set iU for D3Q15
template<class T, template<class>class P, class Fv0, class Fv1, class Fv2, class Ff>
void iBoundaryConditionSetU(P<T>& _p, Fv0 _uxbc, Fv1 _uybc, Fv2 _uzbc, Ff _bctype, T _eps = T()) {
// On xmin
if (_p.PEx == 0) {
for (int j = 0; j < _p.ny; ++j) {
for (int k = 0; k < _p.nz; ++k) {
if (_bctype(0 + _p.offsetx, j + _p.offsety, k + _p.offsetz)) {
int idx = _p.Index(0, j, k);
T rho0 = (-4.0*_eps + _uxbc(0 + _p.offsetx, j + _p.offsety, k + _p.offsetz)*(8.0*_p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 9)] + _p.f[P<T>::IndexF(idx, 10)] + _p.f[P<T>::IndexF(idx, 12)])
+ 3.0*_uybc(0 + _p.offsetx, j + _p.offsety, k + _p.offsetz)*(_p.f[P<T>::IndexF(idx, 7)] - _p.f[P<T>::IndexF(idx, 9)] + _p.f[P<T>::IndexF(idx, 10)] - _p.f[P<T>::IndexF(idx, 12)])
+ 3.0*_uzbc(0 + _p.offsetx, j + _p.offsety, k + _p.offsetz)*(_p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 9)] - _p.f[P<T>::IndexF(idx, 10)] - _p.f[P<T>::IndexF(idx, 12)])
)/(6.0*(1.0 - _uxbc(0 + _p.offsetx, j + _p.offsety, k + _p.offsetz)));
_p.f[P<T>::IndexF(idx, 4)] = _p.f[P<T>::IndexF(idx, 1)] + rho0;
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 12)] + rho0;
_p.f[P<T>::IndexF(idx, 11)] = _p.f[P<T>::IndexF(idx, 7)] + rho0;
_p.f[P<T>::IndexF(idx, 13)] = _p.f[P<T>::IndexF(idx, 9)] + rho0;
_p.f[P<T>::IndexF(idx, 14)] = _p.f[P<T>::IndexF(idx, 10)] + rho0;
}
}
}
}
// On xmax
if (_p.PEx == _p.mx - 1) {
for (int j = 0; j < _p.ny; ++j) {
for (int k = 0; k < _p.nz; ++k) {
if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety, k + _p.offsetz)) {
int idx = _p.Index(_p.nx - 1, j, k);
T rho0 = (-4.0*_eps - _uxbc((_p.nx - 1) + _p.offsetx, j + _p.offsety, k + _p.offsetz)*(8.0*_p.f[P<T>::IndexF(idx, 4)] + _p.f[P<T>::IndexF(idx, 8)] + _p.f[P<T>::IndexF(idx, 11)] + _p.f[P<T>::IndexF(idx, 13)] + _p.f[P<T>::IndexF(idx, 14)])
+ 3.0*_uybc((_p.nx - 1) + _p.offsetx, j + _p.offsety, k + _p.offsetz)*(_p.f[P<T>::IndexF(idx, 8)] - _p.f[P<T>::IndexF(idx, 11)] + _p.f[P<T>::IndexF(idx, 13)] - _p.f[P<T>::IndexF(idx, 14)])
+ 3.0*_uzbc((_p.nx - 1) + _p.offsetx, j + _p.offsety, k + _p.offsetz)*(_p.f[P<T>::IndexF(idx, 8)] - _p.f[P<T>::IndexF(idx, 11)] - _p.f[P<T>::IndexF(idx, 13)] + _p.f[P<T>::IndexF(idx, 14)])
)/(6.0*(1.0 + _uxbc((_p.nx - 1) + _p.offsetx, j + _p.offsety, k + _p.offsetz)));
_p.f[P<T>::IndexF(idx, 1)] = _p.f[P<T>::IndexF(idx, 4)] + rho0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 11)] + rho0;
_p.f[P<T>::IndexF(idx, 9)] = _p.f[P<T>::IndexF(idx, 13)] + rho0;
_p.f[P<T>::IndexF(idx, 10)] = _p.f[P<T>::IndexF(idx, 14)] + rho0;
_p.f[P<T>::IndexF(idx, 12)] = _p.f[P<T>::IndexF(idx, 8)] + rho0;
}
}
}
}
// On ymin
if (_p.PEy == 0) {
for (int k = 0; k < _p.nz; ++k) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, 0 + _p.offsety, k + _p.offsetz)) {
int idx = _p.Index(i, 0, k);
T rho0 = (-4.0*_eps + 3.0*_uxbc(i + _p.offsetx, 0 + _p.offsety, k + _p.offsetz)*(_p.f[P<T>::IndexF(idx, 7)] - _p.f[P<T>::IndexF(idx, 8)] + _p.f[P<T>::IndexF(idx, 10)] - _p.f[P<T>::IndexF(idx, 13)])
+ _uybc(i + _p.offsetx, 0 + _p.offsety, k + _p.offsetz)*(8.0*_p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)] + _p.f[P<T>::IndexF(idx, 10)] + _p.f[P<T>::IndexF(idx, 13)])
+ 3.0*_uzbc(i + _p.offsetx, 0 + _p.offsety, k + _p.offsetz)*(_p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)] - _p.f[P<T>::IndexF(idx, 10)] - _p.f[P<T>::IndexF(idx, 13)])
)/(6.0*(1.0 - _uybc(i + _p.offsetx, 0 + _p.offsety, k + _p.offsetz)));
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 2)] + rho0;
_p.f[P<T>::IndexF(idx, 9)] = _p.f[P<T>::IndexF(idx, 13)] + rho0;
_p.f[P<T>::IndexF(idx, 11)] = _p.f[P<T>::IndexF(idx, 7)] + rho0;
_p.f[P<T>::IndexF(idx, 12)] = _p.f[P<T>::IndexF(idx, 8)] + rho0;
_p.f[P<T>::IndexF(idx, 14)] = _p.f[P<T>::IndexF(idx, 10)] + rho0;
}
}
}
}
// On ymax
if (_p.PEy == _p.my - 1) {
for (int k = 0; k < _p.nz; ++k) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety, k + _p.offsetz)) {
int idx = _p.Index(i, _p.ny - 1, k);
T rho0 = (-4.0*_eps + 3.0*_uxbc(i + _p.offsetx, (_p.ny - 1) + _p.offsety, k + _p.offsetz)*(_p.f[P<T>::IndexF(idx, 9)] - _p.f[P<T>::IndexF(idx, 11)] + _p.f[P<T>::IndexF(idx, 12)] - _p.f[P<T>::IndexF(idx, 14)])
- _uybc(i + _p.offsetx, (_p.ny - 1) + _p.offsety, k + _p.offsetz)*(8.0*_p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 9)] + _p.f[P<T>::IndexF(idx, 11)] + _p.f[P<T>::IndexF(idx, 12)] + _p.f[P<T>::IndexF(idx, 14)])
+ 3.0*_uzbc(i + _p.offsetx, (_p.ny - 1) + _p.offsety, k + _p.offsetz)*(_p.f[P<T>::IndexF(idx, 9)] - _p.f[P<T>::IndexF(idx, 11)] - _p.f[P<T>::IndexF(idx, 12)] + _p.f[P<T>::IndexF(idx, 14)])
)/(6.0*(1.0 + _uybc(i + _p.offsetx, (_p.ny - 1) + _p.offsety, k + _p.offsetz)));
_p.f[P<T>::IndexF(idx, 2)] = _p.f[P<T>::IndexF(idx, 5)] + rho0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 11)] + rho0;
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 12)] + rho0;
_p.f[P<T>::IndexF(idx, 10)] = _p.f[P<T>::IndexF(idx, 14)] + rho0;
_p.f[P<T>::IndexF(idx, 13)] = _p.f[P<T>::IndexF(idx, 9)] + rho0;
}
}
}
}
// On zmin
if (_p.PEz == 0) {
for (int i = 0; i < _p.nx; ++i) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype(i + _p.offsetx, j + _p.offsety, 0 + _p.offsetz)) {
int idx = _p.Index(i, j, 0);
T rho0 = (-4.0*_eps + 3.0*_uxbc(i + _p.offsetx, j + _p.offsety, 0 + _p.offsetz)*(_p.f[P<T>::IndexF(idx, 7)] - _p.f[P<T>::IndexF(idx, 8)] + _p.f[P<T>::IndexF(idx, 9)] - _p.f[P<T>::IndexF(idx, 14)])
+ 3.0*_uybc(i + _p.offsetx, j + _p.offsety, 0 + _p.offsetz)*(_p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)] - _p.f[P<T>::IndexF(idx, 9)] - _p.f[P<T>::IndexF(idx, 14)])
+ _uzbc(i + _p.offsetx, j + _p.offsety, 0 + _p.offsetz)*(8.0*_p.f[P<T>::IndexF(idx, 3)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)] + _p.f[P<T>::IndexF(idx, 9)] + _p.f[P<T>::IndexF(idx, 14)])
)/(6.0*(1.0 - _uzbc(i + _p.offsetx, j + _p.offsety, 0 + _p.offsetz)));
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 3)] + rho0;
_p.f[P<T>::IndexF(idx, 10)] = _p.f[P<T>::IndexF(idx, 14)] + rho0;
_p.f[P<T>::IndexF(idx, 11)] = _p.f[P<T>::IndexF(idx, 7)] + rho0;
_p.f[P<T>::IndexF(idx, 12)] = _p.f[P<T>::IndexF(idx, 8)] + rho0;
_p.f[P<T>::IndexF(idx, 13)] = _p.f[P<T>::IndexF(idx, 9)] + rho0;
}
}
}
}
// On zmax
if (_p.PEz == _p.mz - 1) {
for (int i = 0; i < _p.nx; ++i) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype(i + _p.offsetx, j + _p.offsety, (_p.nz - 1) + _p.offsetz)) {
int idx = _p.Index(i, j, _p.nz - 1);
T rho0 = (-4.0*_eps + 3.0*_uxbc(i + _p.offsetx, j + _p.offsety, (_p.nz - 1) + _p.offsetz)*(_p.f[P<T>::IndexF(idx, 10)] - _p.f[P<T>::IndexF(idx, 11)] + _p.f[P<T>::IndexF(idx, 12)] - _p.f[P<T>::IndexF(idx, 13)])
+ 3.0*_uybc(i + _p.offsetx, j + _p.offsety, (_p.nz - 1) + _p.offsetz)*(_p.f[P<T>::IndexF(idx, 10)] - _p.f[P<T>::IndexF(idx, 11)] - _p.f[P<T>::IndexF(idx, 12)] + _p.f[P<T>::IndexF(idx, 13)])
- _uzbc(i + _p.offsetx, j + _p.offsety, (_p.nz - 1) + _p.offsetz)*(8.0*_p.f[P<T>::IndexF(idx, 6)] + _p.f[P<T>::IndexF(idx, 10)] + _p.f[P<T>::IndexF(idx, 11)] + _p.f[P<T>::IndexF(idx, 12)] + _p.f[P<T>::IndexF(idx, 13)])
)/(6.0*(1.0 + _uzbc(i + _p.offsetx, j + _p.offsety, (_p.nz - 1) + _p.offsetz)));
_p.f[P<T>::IndexF(idx, 3)] = _p.f[P<T>::IndexF(idx, 6)] + rho0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 11)] + rho0;
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 12)] + rho0;
_p.f[P<T>::IndexF(idx, 9)] = _p.f[P<T>::IndexF(idx, 13)] + rho0;
_p.f[P<T>::IndexF(idx, 14)] = _p.f[P<T>::IndexF(idx, 10)] + rho0;
}
}
}
}
}
// Function of setting boundary condition of ANS set iRho for D2Q9
template<class T, template<class>class P, class Ff>
void iBoundaryConditionSetRho2D(P<T>& _p, Ff _bctype) {
// On xmin
if (_p.PEx == 0) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype(0 + _p.offsetx, j + _p.offsety)) {
int idx = _p.Index(0, j);
T rho0 = (4.0*_p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 8)])/3.0;
_p.f[P<T>::IndexF(idx, 3)] = _p.f[P<T>::IndexF(idx, 1)] - rho0;
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] - rho0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - rho0;
}
}
}
// On xmax
if (_p.PEx == _p.mx - 1) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety)) {
int idx = _p.Index(_p.nx - 1, j);
T rho0 = (4.0*_p.f[P<T>::IndexF(idx, 3)] + _p.f[P<T>::IndexF(idx, 6)] + _p.f[P<T>::IndexF(idx, 7)])/3.0;
_p.f[P<T>::IndexF(idx, 1)] = _p.f[P<T>::IndexF(idx, 3)] - rho0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] - rho0;
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] - rho0;
}
}
}
// On ymin
if (_p.PEy == 0) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, 0 + _p.offsety)) {
int idx = _p.Index(i, 0);
T rho0 = (4.0*_p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 6)])/3.0;
_p.f[P<T>::IndexF(idx, 4)] = _p.f[P<T>::IndexF(idx, 2)] - rho0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - rho0;
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] - rho0;
}
}
}
// On ymax
if (_p.PEy == _p.my - 1) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety)) {
int idx = _p.Index(i, _p.ny - 1);
T rho0 = (4.0*_p.f[P<T>::IndexF(idx, 4)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)])/3.0;
_p.f[P<T>::IndexF(idx, 2)] = _p.f[P<T>::IndexF(idx, 4)] - rho0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] - rho0;
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] - rho0;
}
}
}
}
// Function of setting boundary condition of ANS set iRho for D3Q15
template<class T, template<class>class P, class Ff>
void iBoundaryConditionSetRho3D(P<T>& _p, Ff _bctype) {
// On xmin
if (_p.PEx == 0) {
for (int j = 0; j < _p.ny; ++j) {
for (int k = 0; k < _p.nz; ++k) {
if (_bctype(0 + _p.offsetx, j + _p.offsety, k + _p.offsetz)) {
int idx = _p.Index(0, j, k);
T rho0 = (8.0*_p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 9)] + _p.f[P<T>::IndexF(idx, 10)] + _p.f[P<T>::IndexF(idx, 12)])/6.0;
_p.f[P<T>::IndexF(idx, 4)] = _p.f[P<T>::IndexF(idx, 1)] - rho0;
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 12)] - rho0;
_p.f[P<T>::IndexF(idx, 11)] = _p.f[P<T>::IndexF(idx, 7)] - rho0;
_p.f[P<T>::IndexF(idx, 13)] = _p.f[P<T>::IndexF(idx, 9)] - rho0;
_p.f[P<T>::IndexF(idx, 14)] = _p.f[P<T>::IndexF(idx, 10)] - rho0;
}
}
}
}
// On xmax
if (_p.PEx == _p.mx - 1) {
for (int j = 0; j < _p.ny; ++j) {
for (int k = 0; k < _p.nz; ++k) {
if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety, k + _p.offsetz)) {
int idx = _p.Index(_p.nx - 1, j, k);
T rho0 = (8.0*_p.f[P<T>::IndexF(idx, 4)] + _p.f[P<T>::IndexF(idx, 8)] + _p.f[P<T>::IndexF(idx, 11)] + _p.f[P<T>::IndexF(idx, 13)] + _p.f[P<T>::IndexF(idx, 14)])/6.0;
_p.f[P<T>::IndexF(idx, 1)] = _p.f[P<T>::IndexF(idx, 4)] - rho0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 11)] - rho0;
_p.f[P<T>::IndexF(idx, 9)] = _p.f[P<T>::IndexF(idx, 13)] - rho0;
_p.f[P<T>::IndexF(idx, 10)] = _p.f[P<T>::IndexF(idx, 14)] - rho0;
_p.f[P<T>::IndexF(idx, 12)] = _p.f[P<T>::IndexF(idx, 8)] - rho0;
}
}
}
}
// On ymin
if (_p.PEy == 0) {
for (int k = 0; k < _p.nz; ++k) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, 0 + _p.offsety, k + _p.offsetz)) {
int idx = _p.Index(i, 0, k);
T rho0 = (8.0*_p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)] + _p.f[P<T>::IndexF(idx, 10)] + _p.f[P<T>::IndexF(idx, 13)])/6.0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 2)] - rho0;
_p.f[P<T>::IndexF(idx, 9)] = _p.f[P<T>::IndexF(idx, 13)] - rho0;
_p.f[P<T>::IndexF(idx, 11)] = _p.f[P<T>::IndexF(idx, 7)] - rho0;
_p.f[P<T>::IndexF(idx, 12)] = _p.f[P<T>::IndexF(idx, 8)] - rho0;
_p.f[P<T>::IndexF(idx, 14)] = _p.f[P<T>::IndexF(idx, 10)] - rho0;
}
}
}
}
// On ymax
if (_p.PEy == _p.my - 1) {
for (int k = 0; k < _p.nz; ++k) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety, k + _p.offsetz)) {
int idx = _p.Index(i, _p.ny - 1, k);
T rho0 = (8.0*_p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 9)] + _p.f[P<T>::IndexF(idx, 11)] + _p.f[P<T>::IndexF(idx, 12)] + _p.f[P<T>::IndexF(idx, 14)])/6.0;
_p.f[P<T>::IndexF(idx, 2)] = _p.f[P<T>::IndexF(idx, 5)] - rho0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 11)] - rho0;
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 12)] - rho0;
_p.f[P<T>::IndexF(idx, 10)] = _p.f[P<T>::IndexF(idx, 14)] - rho0;
_p.f[P<T>::IndexF(idx, 13)] = _p.f[P<T>::IndexF(idx, 9)] - rho0;
}
}
}
}
// On zmin
if (_p.PEz == 0) {
for (int i = 0; i < _p.nx; ++i) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype(i + _p.offsetx, j + _p.offsety, 0 + _p.offsetz)) {
int idx = _p.Index(i, j, 0);
T rho0 = (8.0*_p.f[P<T>::IndexF(idx, 3)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)] + _p.f[P<T>::IndexF(idx, 9)] + _p.f[P<T>::IndexF(idx, 14)])/6.0;
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 3)] - rho0;
_p.f[P<T>::IndexF(idx, 10)] = _p.f[P<T>::IndexF(idx, 14)] - rho0;
_p.f[P<T>::IndexF(idx, 11)] = _p.f[P<T>::IndexF(idx, 7)] - rho0;
_p.f[P<T>::IndexF(idx, 12)] = _p.f[P<T>::IndexF(idx, 8)] - rho0;
_p.f[P<T>::IndexF(idx, 13)] = _p.f[P<T>::IndexF(idx, 9)] - rho0;
}
}
}
}
// On zmax
if (_p.PEz == _p.mz - 1) {
for (int i = 0; i < _p.nx; ++i) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype(i + _p.offsetx, j + _p.offsety, (_p.nz - 1) + _p.offsetz)) {
int idx = _p.Index(i, j, _p.nz - 1);
T rho0 = (8.0*_p.f[P<T>::IndexF(idx, 6)] + _p.f[P<T>::IndexF(idx, 10)] + _p.f[P<T>::IndexF(idx, 11)] + _p.f[P<T>::IndexF(idx, 12)] + _p.f[P<T>::IndexF(idx, 13)])/6.0;
_p.f[P<T>::IndexF(idx, 3)] = _p.f[P<T>::IndexF(idx, 6)] - rho0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 11)] - rho0;
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 12)] - rho0;
_p.f[P<T>::IndexF(idx, 9)] = _p.f[P<T>::IndexF(idx, 13)] - rho0;
_p.f[P<T>::IndexF(idx, 14)] = _p.f[P<T>::IndexF(idx, 10)] - rho0;
}
}
}
}
}
}
} |
parallel.c | #include <graph.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#define NUMBER_OF_THREADS 6
void mp_implementation(int **dist);
int main() {
int **dist;
dist = create_array(NUMBER_OF_NODES);
#ifndef WRITE_DISTANCES_TO_FILE
printf("%s%d\n", "Number of nodes: ", NUMBER_OF_NODES);
#endif
mp_implementation(dist);
free_resources(dist);
return EXIT_SUCCESS;
}
/**
This function implements Floyd-Warshall algorithm
using the OpenMP API.
**/
void mp_implementation(int **dist) {
double time_start, time_end;
fill_array(dist);
show_distances(dist);
int thread_id;
int step = NUMBER_OF_NODES/NUMBER_OF_THREADS, start, stop;
int i, j, k;
if (step < 1) {
/** the program aborts when the number of threads is greater than the number of nodes
(since the last thread will do all the work by itself) **/
abort_with_error(STEP_LESS_THAN_ONE);
}
omp_set_num_threads(NUMBER_OF_THREADS); // set the number of threads
time_start = omp_get_wtime(); // start measuring time
// each thread must have its own thread id, start and stop indices of the dist array
#pragma omp parallel private(thread_id, start, stop)
{
thread_id = omp_get_thread_num();
start = step*thread_id; // set start and stop indices for variable k
stop = start + step;
if (thread_id == omp_get_num_threads() - 1) {
stop = NUMBER_OF_NODES;
}
// run Floyd-Warshall algorithm using threads
#pragma omp parallel for private(i, j)
for (k = start; k < stop; k++) {
for (i = 0; i < NUMBER_OF_NODES; i++) {
for (j = 0; j < NUMBER_OF_NODES; j++) {
if (i == j) continue;
dist[i][j] = MIN(dist[i][j], dist[i][k] + dist[k][j]);
}
}
}
}
time_end = omp_get_wtime(); // end measuring time
double elapsed_time = time_end - time_start;
show_distances(dist);
#ifndef WRITE_DISTANCES_TO_FILE
printf("OpenMP: total elapsed time: %.6f sec. Number of threads: %d.\n", elapsed_time, NUMBER_OF_THREADS);
#endif
}
|
GB_binop__max_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__max_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__max_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__max_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int8)
// A*D function (colscale): GB (_AxD__max_int8)
// D*A function (rowscale): GB (_DxB__max_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__max_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__max_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int8)
// C=scalar+B GB (_bind1st__max_int8)
// C=scalar+B' GB (_bind1st_tran__max_int8)
// C=A+scalar GB (_bind2nd__max_int8)
// C=A'+scalar GB (_bind2nd_tran__max_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_INT8 || GxB_NO_MAX_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__max_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residualbased_newton_raphson_strategy.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY)
#define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY
// System includes
// External includes
// Project includes
#include "includes/define.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/builtin_timer.h"
//default builder and solver
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedNewtonRaphsonStrategy
* @ingroup KratosCore
* @brief This is the base Newton Raphson strategy
* @details This strategy iterates until the convergence is achieved (or the maximum number of iterations is surpassed) using a Newton Raphson algorithm
* @author Riccardo Rossi
*/
template <class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedNewtonRaphsonStrategy
: public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
// Counted pointer of ClassName
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedNewtonRaphsonStrategy);
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
*/
explicit ResidualBasedNewtonRaphsonStrategy() : BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
explicit ResidualBasedNewtonRaphsonStrategy(ModelPart& rModelPart)
: ResidualBasedNewtonRaphsonStrategy(rModelPart, ResidualBasedNewtonRaphsonStrategy::GetDefaultParameters())
{
}
/**
* @brief Default constructor. (with parameters)
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
explicit ResidualBasedNewtonRaphsonStrategy(ModelPart& rModelPart, Parameters ThisParameters)
: BaseType(rModelPart),
mSolutionStepIsInitialized(false),
mInitializeWasPerformed(false),
mKeepSystemConstantDuringIterations(false)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
// Getting builder and solver
auto p_builder_and_solver = GetBuilderAndSolver();
if (p_builder_and_solver != nullptr) {
// Tells to the builder and solver if the reactions have to be Calculated or not
p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
// be reshaped at each step or not
p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
} else {
KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "BuilderAndSolver is not initialized. Please assign one before settings flags" << std::endl;
}
mpA = TSparseSpace::CreateEmptyMatrixPointer();
mpDx = TSparseSpace::CreateEmptyVectorPointer();
mpb = TSparseSpace::CreateEmptyVectorPointer();
}
/**
* Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
explicit ResidualBasedNewtonRaphsonStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
int MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false)
: BaseType(rModelPart, MoveMeshFlag),
mpScheme(pScheme),
mpConvergenceCriteria(pNewConvergenceCriteria),
mReformDofSetAtEachStep(ReformDofSetAtEachStep),
mCalculateReactionsFlag(CalculateReactions),
mSolutionStepIsInitialized(false),
mMaxIterationNumber(MaxIterations),
mInitializeWasPerformed(false),
mKeepSystemConstantDuringIterations(false)
{
KRATOS_TRY;
// Setting up the default builder and solver
mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer(
new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSolver));
// Tells to the builder and solver if the reactions have to be Calculated or not
mpBuilderAndSolver->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
// be reshaped at each step or not
mpBuilderAndSolver->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
// Set EchoLevel to the default value (only time is displayed)
SetEchoLevel(1);
// By default the matrices are rebuilt at each iteration
this->SetRebuildLevel(2);
mpA = TSparseSpace::CreateEmptyMatrixPointer();
mpDx = TSparseSpace::CreateEmptyVectorPointer();
mpb = TSparseSpace::CreateEmptyVectorPointer();
KRATOS_CATCH("");
}
/**
* @brief Constructor specifying the builder and solver
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewConvergenceCriteria The convergence criteria employed
* @param pNewBuilderAndSolver The builder and solver employed
* @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
explicit ResidualBasedNewtonRaphsonStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
int MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false)
: BaseType(rModelPart, MoveMeshFlag),
mpScheme(pScheme),
mpBuilderAndSolver(pNewBuilderAndSolver),
mpConvergenceCriteria(pNewConvergenceCriteria),
mReformDofSetAtEachStep(ReformDofSetAtEachStep),
mCalculateReactionsFlag(CalculateReactions),
mSolutionStepIsInitialized(false),
mMaxIterationNumber(MaxIterations),
mInitializeWasPerformed(false),
mKeepSystemConstantDuringIterations(false)
{
KRATOS_TRY
// Getting builder and solver
auto p_builder_and_solver = GetBuilderAndSolver();
// Tells to the builder and solver if the reactions have to be Calculated or not
p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
//be reshaped at each step or not
p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
// Set EchoLevel to the default value (only time is displayed)
SetEchoLevel(1);
// By default the matrices are rebuilt at each iteration
this->SetRebuildLevel(2);
mpA = TSparseSpace::CreateEmptyMatrixPointer();
mpDx = TSparseSpace::CreateEmptyVectorPointer();
mpb = TSparseSpace::CreateEmptyVectorPointer();
KRATOS_CATCH("")
}
/**
* @brief Constructor specifying the builder and solver
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param pNewBuilderAndSolver The builder and solver employed
* @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
KRATOS_DEPRECATED_MESSAGE("Constructor deprecated, please use the constructor without linear solver")
explicit ResidualBasedNewtonRaphsonStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
int MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag)
{
KRATOS_TRY
KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "This constructor is deprecated, please use the constructor without linear solver" << std::endl;
// Getting builder and solver
auto p_builder_and_solver = GetBuilderAndSolver();
// We check if the linear solver considered for the builder and solver is consistent
auto p_linear_solver = p_builder_and_solver->GetLinearSystemSolver();
KRATOS_ERROR_IF(p_linear_solver != pNewLinearSolver) << "Inconsistent linear solver in strategy and builder and solver. Considering the linear solver assigned to builder and solver :\n" << p_linear_solver->Info() << "\n instead of:\n" << pNewLinearSolver->Info() << std::endl;
KRATOS_CATCH("")
}
/**
* Constructor with Parameters
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param Settings Settings used in the strategy
*/
ResidualBasedNewtonRaphsonStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
Parameters Settings)
: BaseType(rModelPart),
mpScheme(pScheme),
mpConvergenceCriteria(pNewConvergenceCriteria),
mSolutionStepIsInitialized(false),
mInitializeWasPerformed(false),
mKeepSystemConstantDuringIterations(false)
{
KRATOS_TRY;
// Setting up the default builder and solver
mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer(
new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSolver));
// Tells to the builder and solver if the reactions have to be Calculated or not
mpBuilderAndSolver->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
// be reshaped at each step or not
mpBuilderAndSolver->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
// Set EchoLevel to the default value (only time is displayed)
SetEchoLevel(1);
// By default the matrices are rebuilt at each iteration
this->SetRebuildLevel(2);
mpA = TSparseSpace::CreateEmptyMatrixPointer();
mpDx = TSparseSpace::CreateEmptyVectorPointer();
mpb = TSparseSpace::CreateEmptyVectorPointer();
KRATOS_CATCH("");
}
/**
* @brief Constructor specifying the builder and solver and using Parameters
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param pNewBuilderAndSolver The builder and solver employed
* @param Settings Settings used in the strategy
*/
ResidualBasedNewtonRaphsonStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
Parameters Settings)
: BaseType(rModelPart),
mpScheme(pScheme),
mpBuilderAndSolver(pNewBuilderAndSolver),
mpConvergenceCriteria(pNewConvergenceCriteria),
mSolutionStepIsInitialized(false),
mInitializeWasPerformed(false),
mKeepSystemConstantDuringIterations(false)
{
KRATOS_TRY
// Getting builder and solver
auto p_builder_and_solver = GetBuilderAndSolver();
// Tells to the builder and solver if the reactions have to be Calculated or not
p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
//be reshaped at each step or not
p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
// Set EchoLevel to the default value (only time is displayed)
SetEchoLevel(1);
// By default the matrices are rebuilt at each iteration
this->SetRebuildLevel(2);
mpA = TSparseSpace::CreateEmptyMatrixPointer();
mpDx = TSparseSpace::CreateEmptyVectorPointer();
mpb = TSparseSpace::CreateEmptyVectorPointer();
KRATOS_CATCH("")
}
/**
* @brief Constructor specifying the builder and solver and using Parameters
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param pNewBuilderAndSolver The builder and solver employed
* @param Parameters Settings used in the strategy
*/
KRATOS_DEPRECATED_MESSAGE("Constructor deprecated, please use the constructor without linear solver")
ResidualBasedNewtonRaphsonStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
Parameters Settings)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, Settings)
{
KRATOS_TRY
KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "This constructor is deprecated, please use the constructor without linear solver" << std::endl;
// Getting builder and solver
auto p_builder_and_solver = GetBuilderAndSolver();
// We check if the linear solver considered for the builder and solver is consistent
auto p_linear_solver = p_builder_and_solver->GetLinearSystemSolver();
KRATOS_ERROR_IF(p_linear_solver != pNewLinearSolver) << "Inconsistent linear solver in strategy and builder and solver. Considering the linear solver assigned to builder and solver :\n" << p_linear_solver->Info() << "\n instead of:\n" << pNewLinearSolver->Info() << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Destructor.
* @details In trilinos third party library, the linear solver's preconditioner should be freed before the system matrix. We control the deallocation order with Clear().
*/
~ResidualBasedNewtonRaphsonStrategy() override
{
// If the linear solver has not been deallocated, clean it before
// deallocating mpA. This prevents a memory error with the the ML
// solver (which holds a reference to it).
// NOTE: The linear solver is hold by the B&S
auto p_builder_and_solver = this->GetBuilderAndSolver();
if (p_builder_and_solver != nullptr) {
p_builder_and_solver->Clear();
}
// Deallocating system vectors to avoid errors in MPI. Clear calls
// TrilinosSpace::Clear for the vectors, which preserves the Map of
// current vectors, performing MPI calls in the process. Due to the
// way Python garbage collection works, this may happen after
// MPI_Finalize has already been called and is an error. Resetting
// the pointers here prevents Clear from operating with the
// (now deallocated) vectors.
mpA.reset();
mpDx.reset();
mpb.reset();
Clear();
}
/**
* @brief Set method for the time scheme
* @param pScheme The pointer to the time scheme considered
*/
void SetScheme(typename TSchemeType::Pointer pScheme)
{
mpScheme = pScheme;
};
/**
* @brief Get method for the time scheme
* @return mpScheme: The pointer to the time scheme considered
*/
typename TSchemeType::Pointer GetScheme()
{
return mpScheme;
};
/**
* @brief Set method for the builder and solver
* @param pNewBuilderAndSolver The pointer to the builder and solver considered
*/
void SetBuilderAndSolver(typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver)
{
mpBuilderAndSolver = pNewBuilderAndSolver;
};
/**
* @brief Get method for the builder and solver
* @return mpBuilderAndSolver: The pointer to the builder and solver considered
*/
typename TBuilderAndSolverType::Pointer GetBuilderAndSolver()
{
return mpBuilderAndSolver;
};
/**
* @brief This method sets the flag mInitializeWasPerformed
* @param InitializePerformedFlag The flag that tells if the initialize has been computed
*/
void SetInitializePerformedFlag(bool InitializePerformedFlag = true)
{
mInitializeWasPerformed = InitializePerformedFlag;
}
/**
* @brief This method gets the flag mInitializeWasPerformed
* @return mInitializeWasPerformed: The flag that tells if the initialize has been computed
*/
bool GetInitializePerformedFlag()
{
return mInitializeWasPerformed;
}
/**
* @brief This method sets the flag mCalculateReactionsFlag
* @param CalculateReactionsFlag The flag that tells if the reactions are computed
*/
void SetCalculateReactionsFlag(bool CalculateReactionsFlag)
{
mCalculateReactionsFlag = CalculateReactionsFlag;
}
/**
* @brief This method returns the flag mCalculateReactionsFlag
* @return The flag that tells if the reactions are computed
*/
bool GetCalculateReactionsFlag()
{
return mCalculateReactionsFlag;
}
/**
* @brief This method sets the flag mFullUpdateFlag
* @param UseOldStiffnessInFirstIterationFlag The flag that tells if
*/
void SetUseOldStiffnessInFirstIterationFlag(bool UseOldStiffnessInFirstIterationFlag)
{
mUseOldStiffnessInFirstIteration = UseOldStiffnessInFirstIterationFlag;
}
/**
* @brief This method returns the flag mFullUpdateFlag
* @return The flag that tells if
*/
bool GetUseOldStiffnessInFirstIterationFlag()
{
return mUseOldStiffnessInFirstIteration;
}
/**
* @brief This method sets the flag mReformDofSetAtEachStep
* @param Flag The flag that tells if each time step the system is rebuilt
*/
void SetReformDofSetAtEachStepFlag(bool Flag)
{
mReformDofSetAtEachStep = Flag;
GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
}
/**
* @brief This method returns the flag mReformDofSetAtEachStep
* @return The flag that tells if each time step the system is rebuilt
*/
bool GetReformDofSetAtEachStepFlag()
{
return mReformDofSetAtEachStep;
}
/**
* @brief This method sets the flag mMaxIterationNumber
* @param MaxIterationNumber This is the maximum number of on linear iterations
*/
void SetMaxIterationNumber(unsigned int MaxIterationNumber)
{
mMaxIterationNumber = MaxIterationNumber;
}
/**
* @brief This method gets the flag mMaxIterationNumber
* @return mMaxIterationNumber: This is the maximum number of on linear iterations
*/
unsigned int GetMaxIterationNumber()
{
return mMaxIterationNumber;
}
/**
* @brief It sets the level of echo for the solving strategy
* @param Level The level to set
* @details The different levels of echo are:
* - 0: Mute... no echo at all
* - 1: Printing time and basic informations
* - 2: Printing linear solver data
* - 3: Print of debug informations: Echo of stiffness matrix, Dx, b...
*/
void SetEchoLevel(int Level) override
{
BaseType::mEchoLevel = Level;
GetBuilderAndSolver()->SetEchoLevel(Level);
}
//*********************************************************************************
/**OPERATIONS ACCESSIBLE FROM THE INPUT: **/
/**
* @brief Create method
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(
ModelPart& rModelPart,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(rModelPart, ThisParameters);
}
/**
* @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the
values of the solution step of interest are assumed equal to the old values
*/
void Predict() override
{
KRATOS_TRY
const DataCommunicator &r_comm = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator();
//OPERATIONS THAT SHOULD BE DONE ONCE - internal check to avoid repetitions
//if the operations needed were already performed this does nothing
if (mInitializeWasPerformed == false)
Initialize();
//initialize solution step
if (mSolutionStepIsInitialized == false)
InitializeSolutionStep();
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
DofsArrayType& r_dof_set = GetBuilderAndSolver()->GetDofSet();
GetScheme()->Predict(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb);
// Applying constraints if needed
auto& r_constraints_array = BaseType::GetModelPart().MasterSlaveConstraints();
const int local_number_of_constraints = r_constraints_array.size();
const int global_number_of_constraints = r_comm.SumAll(local_number_of_constraints);
if(global_number_of_constraints != 0) {
const auto& r_process_info = BaseType::GetModelPart().GetProcessInfo();
const auto it_const_begin = r_constraints_array.begin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i)
(it_const_begin + i)->ResetSlaveDofs(r_process_info);
#pragma omp parallel for
for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i)
(it_const_begin + i)->Apply(r_process_info);
// The following is needed since we need to eventually compute time derivatives after applying
// Master slave relations
TSparseSpace::SetToZero(rDx);
this->GetScheme()->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb);
}
// Move the mesh if needed
if (this->MoveMeshFlag() == true)
BaseType::MoveMesh();
KRATOS_CATCH("")
}
/**
* @brief Initialization of member variables and prior operations
*/
void Initialize() override
{
KRATOS_TRY;
if (mInitializeWasPerformed == false)
{
//pointers needed in the solution
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TConvergenceCriteriaType::Pointer p_convergence_criteria = mpConvergenceCriteria;
//Initialize The Scheme - OPERATIONS TO BE DONE ONCE
if (p_scheme->SchemeIsInitialized() == false)
p_scheme->Initialize(BaseType::GetModelPart());
//Initialize The Elements - OPERATIONS TO BE DONE ONCE
if (p_scheme->ElementsAreInitialized() == false)
p_scheme->InitializeElements(BaseType::GetModelPart());
//Initialize The Conditions - OPERATIONS TO BE DONE ONCE
if (p_scheme->ConditionsAreInitialized() == false)
p_scheme->InitializeConditions(BaseType::GetModelPart());
//initialisation of the convergence criteria
if (p_convergence_criteria->IsInitialized() == false)
p_convergence_criteria->Initialize(BaseType::GetModelPart());
mInitializeWasPerformed = true;
}
KRATOS_CATCH("");
}
/**
* @brief Clears the internal storage
*/
void Clear() override
{
KRATOS_TRY;
// Setting to zero the internal flag to ensure that the dof sets are recalculated. Also clear the linear solver stored in the B&S
auto p_builder_and_solver = GetBuilderAndSolver();
if (p_builder_and_solver != nullptr) {
p_builder_and_solver->SetDofSetIsInitializedFlag(false);
p_builder_and_solver->Clear();
}
// Clearing the system of equations
if (mpA != nullptr)
SparseSpaceType::Clear(mpA);
if (mpDx != nullptr)
SparseSpaceType::Clear(mpDx);
if (mpb != nullptr)
SparseSpaceType::Clear(mpb);
// Clearing scheme
auto p_scheme = GetScheme();
if (p_scheme != nullptr) {
GetScheme()->Clear();
}
mInitializeWasPerformed = false;
mSolutionStepIsInitialized = false;
KRATOS_CATCH("");
}
/**
* @brief This should be considered as a "post solution" convergence check which is useful for coupled analysis - the convergence criteria used is the one used inside the "solve" step
*/
bool IsConverged() override
{
KRATOS_TRY;
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
if (mpConvergenceCriteria->GetActualizeRHSflag() == true)
{
TSparseSpace::SetToZero(rb);
GetBuilderAndSolver()->BuildRHS(GetScheme(), BaseType::GetModelPart(), rb);
}
return mpConvergenceCriteria->PostCriteria(BaseType::GetModelPart(), GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb);
KRATOS_CATCH("");
}
/**
* @brief This operations should be called before printing the results when non trivial results
* (e.g. stresses)
* Need to be calculated given the solution of the step
* @details This operations should be called only when needed, before printing as it can involve a non
* negligible cost
*/
void CalculateOutputData() override
{
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
GetScheme()->CalculateOutputData(BaseType::GetModelPart(),
GetBuilderAndSolver()->GetDofSet(),
rA, rDx, rb);
}
/**
* @brief Performs all the required operations that should be done (for each step) before solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
KRATOS_TRY;
if (!mSolutionStepIsInitialized) {
// Pointers needed in the solution
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
ModelPart& r_model_part = BaseType::GetModelPart();
//set up the system, operation performed just once unless it is required
//to reform the dof set at each iteration
BuiltinTimer system_construction_time;
if (p_builder_and_solver->GetDofSetIsInitializedFlag() == false ||
mReformDofSetAtEachStep == true)
{
//setting up the list of the DOFs to be solved
BuiltinTimer setup_dofs_time;
p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part);
KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "Setup Dofs Time: "
<< setup_dofs_time.ElapsedSeconds() << std::endl;
//shaping correctly the system
BuiltinTimer setup_system_time;
p_builder_and_solver->SetUpSystem(r_model_part);
KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "Setup System Time: "
<< setup_system_time.ElapsedSeconds() << std::endl;
//setting up the Vectors involved to the correct size
BuiltinTimer system_matrix_resize_time;
p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, mpA, mpDx, mpb,
r_model_part);
KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "System Matrix Resize Time: "
<< system_matrix_resize_time.ElapsedSeconds() << std::endl;
}
KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "System Construction Time: "
<< system_construction_time.ElapsedSeconds() << std::endl;
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
// Initial operations ... things that are constant over the Solution Step
p_builder_and_solver->InitializeSolutionStep(r_model_part, rA, rDx, rb);
// Initial operations ... things that are constant over the Solution Step
p_scheme->InitializeSolutionStep(r_model_part, rA, rDx, rb);
// Initialisation of the convergence criteria
if (mpConvergenceCriteria->GetActualizeRHSflag() == true)
{
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
}
mpConvergenceCriteria->InitializeSolutionStep(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb);
if (mpConvergenceCriteria->GetActualizeRHSflag() == true)
TSparseSpace::SetToZero(rb);
mSolutionStepIsInitialized = true;
}
KRATOS_CATCH("");
}
/**
* @brief Performs all the required operations that should be done (for each step) after solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void FinalizeSolutionStep() override
{
KRATOS_TRY;
ModelPart& r_model_part = BaseType::GetModelPart();
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
//Finalisation of the solution step,
//operations to be done after achieving convergence, for example the
//Final Residual Vector (mb) has to be saved in there
//to avoid error accumulation
p_scheme->FinalizeSolutionStep(r_model_part, rA, rDx, rb);
p_builder_and_solver->FinalizeSolutionStep(r_model_part, rA, rDx, rb);
mpConvergenceCriteria->FinalizeSolutionStep(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb);
//Cleaning memory after the solution
p_scheme->Clean();
//reset flags for next step
mSolutionStepIsInitialized = false;
if (mReformDofSetAtEachStep == true) //deallocate the systemvectors
{
this->Clear();
}
KRATOS_CATCH("");
}
/**
* @brief Solves the current step. This function returns true if a solution has been found, false otherwise.
*/
bool SolveSolutionStep() override
{
// Pointers needed in the solution
ModelPart& r_model_part = BaseType::GetModelPart();
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
auto& r_dof_set = p_builder_and_solver->GetDofSet();
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
//initializing the parameters of the Newton-Raphson cycle
unsigned int iteration_number = 1;
r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number;
bool residual_is_updated = false;
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
bool is_converged = mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb);
// Function to perform the building and the solving phase.
if (BaseType::mRebuildLevel > 0 || BaseType::mStiffnessMatrixIsBuilt == false) {
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
if (mUseOldStiffnessInFirstIteration){
p_builder_and_solver->BuildAndSolveLinearizedOnPreviousIteration(p_scheme, r_model_part, rA, rDx, rb,BaseType::MoveMeshFlag());
} else {
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
} else {
TSparseSpace::SetToZero(rDx); // Dx = 0.00;
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
// Debugging info
EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(rA, rDx, rb, BaseType::MoveMeshFlag());
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
if (is_converged) {
if (mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
}
is_converged = mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb);
}
//Iteration Cycle... performed only for NonLinearProblems
while (is_converged == false &&
iteration_number++ < mMaxIterationNumber)
{
//setting the number of iteration
r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number;
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
is_converged = mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb);
//call the linear system solver to find the correction mDx for the
//it is not called if there is no system to solve
if (SparseSpaceType::Size(rDx) != 0)
{
if (BaseType::mRebuildLevel > 1 || BaseType::mStiffnessMatrixIsBuilt == false)
{
if (GetKeepSystemConstantDuringIterations() == false)
{
//A = 0.00;
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
else
{
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
}
else
{
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
}
else
{
KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl;
}
// Debugging info
EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(rA, rDx, rb, BaseType::MoveMeshFlag());
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
residual_is_updated = false;
if (is_converged == true)
{
if (mpConvergenceCriteria->GetActualizeRHSflag() == true)
{
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
residual_is_updated = true;
}
is_converged = mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb);
}
}
//plots a warning if the maximum number of iterations is exceeded
if (iteration_number >= mMaxIterationNumber) {
MaxIterationsExceeded();
} else {
KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", this->GetEchoLevel() > 0)
<< "Convergence achieved after " << iteration_number << " / "
<< mMaxIterationNumber << " iterations" << std::endl;
}
//recalculate residual if needed
//(note that some convergence criteria need it to be recalculated)
if (residual_is_updated == false)
{
// NOTE:
// The following part will be commented because it is time consuming
// and there is no obvious reason to be here. If someone need this
// part please notify the community via mailing list before uncommenting it.
// Pooyan.
// TSparseSpace::SetToZero(mb);
// p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb);
}
//calculate reactions if required
if (mCalculateReactionsFlag == true)
p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb);
return is_converged;
}
/**
* @brief Function to perform expensive checks.
* @details It is designed to be called ONCE to verify that the input is correct.
*/
int Check() override
{
KRATOS_TRY
BaseType::Check();
GetBuilderAndSolver()->Check(BaseType::GetModelPart());
GetScheme()->Check(BaseType::GetModelPart());
mpConvergenceCriteria->Check(BaseType::GetModelPart());
return 0;
KRATOS_CATCH("")
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "newton_raphson_strategy",
"use_old_stiffness_in_first_iteration": false,
"max_iteration" : 10,
"reform_dofs_at_each_step" : false,
"compute_reactions" : false,
"builder_and_solver_settings" : {},
"convergence_criteria_settings" : {},
"linear_solver_settings" : {},
"scheme_settings" : {}
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "newton_raphson_strategy";
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
///@}
///@name Access
///@{
/**
* @brief This method returns the LHS matrix
* @return The LHS matrix
*/
TSystemMatrixType &GetSystemMatrix() override
{
TSystemMatrixType &mA = *mpA;
return mA;
}
/**
* @brief This method returns the RHS vector
* @return The RHS vector
*/
TSystemVectorType& GetSystemVector() override
{
TSystemVectorType& mb = *mpb;
return mb;
}
/**
* @brief This method returns the solution vector
* @return The Dx vector
*/
TSystemVectorType& GetSolutionVector() override
{
TSystemVectorType& mDx = *mpDx;
return mDx;
}
/**
* @brief Set method for the flag mKeepSystemConstantDuringIterations
* @param Value If we consider constant the system of equations during the iterations
*/
void SetKeepSystemConstantDuringIterations(bool Value)
{
mKeepSystemConstantDuringIterations = Value;
}
/**
* @brief Get method for the flag mKeepSystemConstantDuringIterations
* @return True if we consider constant the system of equations during the iterations, false otherwise
*/
bool GetKeepSystemConstantDuringIterations()
{
return mKeepSystemConstantDuringIterations;
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedNewtonRaphsonStrategy";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
private:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
protected:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
typename TSchemeType::Pointer mpScheme = nullptr; /// The pointer to the time scheme employed
typename TBuilderAndSolverType::Pointer mpBuilderAndSolver = nullptr; /// The pointer to the builder and solver employed
typename TConvergenceCriteriaType::Pointer mpConvergenceCriteria = nullptr; /// The pointer to the convergence criteria employed
TSystemVectorPointerType mpDx; /// The increment in the solution
TSystemVectorPointerType mpb; /// The RHS vector of the system of equations
TSystemMatrixPointerType mpA; /// The LHS matrix of the system of equations
/**
* @brief Flag telling if it is needed to reform the DofSet at each
solution step or if it is possible to form it just once
* @details Default = false
- true : Reforme at each time step
- false : Form just one (more efficient)
*/
bool mReformDofSetAtEachStep;
/**
* @brief Flag telling if it is needed or not to compute the reactions
* @details default = true
*/
bool mCalculateReactionsFlag;
/**
* @brief Flag telling if a full update of the database will be performed at the first iteration
* @details default = false
*/
bool mUseOldStiffnessInFirstIteration = false;
bool mSolutionStepIsInitialized; /// Flag to set as initialized the solution step
unsigned int mMaxIterationNumber; /// The maximum number of iterations, 30 by default
bool mInitializeWasPerformed; /// Flag to set as initialized the strategy
bool mKeepSystemConstantDuringIterations; // Flag to allow keeping system matrix constant during iterations
///@}
///@name Private Operators
///@{
/**
* @brief Here the database is updated
* @param A The LHS matrix of the system of equations
* @param Dx The incremement in the solution
* @param b The RHS vector of the system of equations
* @param MoveMesh The flag that allows to move the mesh
*/
virtual void UpdateDatabase(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
const bool MoveMesh)
{
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
p_scheme->Update(BaseType::GetModelPart(), p_builder_and_solver->GetDofSet(), rA, rDx, rb);
// Move the mesh if needed
if (MoveMesh == true)
BaseType::MoveMesh();
}
/**
* @brief This method returns the components of the system of equations depending of the echo level
* @param IterationNumber The non linear iteration in the solution loop
*/
virtual void EchoInfo(const unsigned int IterationNumber)
{
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
if (this->GetEchoLevel() == 2) //if it is needed to print the debug info
{
KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl;
KRATOS_INFO("RHS") << "RHS = " << rb << std::endl;
}
else if (this->GetEchoLevel() == 3) //if it is needed to print the debug info
{
KRATOS_INFO("LHS") << "SystemMatrix = " << rA << std::endl;
KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl;
KRATOS_INFO("RHS") << "RHS = " << rb << std::endl;
}
else if (this->GetEchoLevel() == 4) //print to matrix market file
{
std::stringstream matrix_market_name;
matrix_market_name << "A_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm";
TSparseSpace::WriteMatrixMarketMatrix((char *)(matrix_market_name.str()).c_str(), rA, false);
std::stringstream matrix_market_vectname;
matrix_market_vectname << "b_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm.rhs";
TSparseSpace::WriteMatrixMarketVector((char *)(matrix_market_vectname.str()).c_str(), rb);
}
}
/**
* @brief This method prints information after reach the max number of iterations
*/
virtual void MaxIterationsExceeded()
{
KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", this->GetEchoLevel() > 0)
<< "ATTENTION: max iterations ( " << mMaxIterationNumber
<< " ) exceeded!" << std::endl;
}
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
mMaxIterationNumber = ThisParameters["max_iteration"].GetInt();
mReformDofSetAtEachStep = ThisParameters["reform_dofs_at_each_step"].GetBool();
mCalculateReactionsFlag = ThisParameters["compute_reactions"].GetBool();
mUseOldStiffnessInFirstIteration = ThisParameters["use_old_stiffness_in_first_iteration"].GetBool();
// Saving the convergence criteria to be used
if (ThisParameters["convergence_criteria_settings"].Has("name")) {
KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl;
}
// Saving the scheme
if (ThisParameters["scheme_settings"].Has("name")) {
KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl;
}
// Setting up the default builder and solver
if (ThisParameters["builder_and_solver_settings"].Has("name")) {
KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl;
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/**
* Copy constructor.
*/
ResidualBasedNewtonRaphsonStrategy(const ResidualBasedNewtonRaphsonStrategy &Other){};
///@}
}; /* Class ResidualBasedNewtonRaphsonStrategy */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos. */
#endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY defined */
|
GB_binop__times_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__times_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__times_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint32)
// A*D function (colscale): GB (_AxD__times_uint32)
// D*A function (rowscale): GB (_DxB__times_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__times_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__times_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint32)
// C=scalar+B GB (_bind1st__times_uint32)
// C=scalar+B' GB (_bind1st_tran__times_uint32)
// C=A+scalar GB (_bind2nd__times_uint32)
// C=A'+scalar GB (_bind2nd_tran__times_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_UINT32 || GxB_NO_TIMES_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__times_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
graph.h | // copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <stdio.h>
#include <cinttypes>
#include <iostream>
#include <type_traits>
#include <map>
#include "pvector.h"
#include "util.h"
#include "segmentgraph.h"
#include <memory>
#include <assert.h>
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_=int32_t, typename WeightT_=int32_t>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
public:
Neighborhood(NodeID_ n, DestID_** g_index) : n_(n), g_index_(g_index) {}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_]; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
//added a second condition to prevent double free (transpose graphs)
/*
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr && in_index_ != out_index_)
delete[] in_index_;
if (in_neighbors_ != nullptr && in_neighbors_ != out_neighbors_)
delete[] in_neighbors_;
}
if (flags_ != nullptr)
delete[] flags_;
*/
out_index_shared_.reset();
out_neighbors_shared_.reset();
in_index_shared_.reset();
in_neighbors_shared_.reset();
flags_shared_.reset();
offsets_shared_.reset();
for (auto iter = label_to_segment.begin(); iter != label_to_segment.end(); iter++) {
delete ((*iter).second);
}
}
public:
#ifndef IGNORE_JULIENNE_TYPES
julienne::graph<julienne::symmetricVertex> julienne_graph = __julienne_null_graph;
//julienne::EdgeMap<julienne::uintE, julienne::symmetricVertex> *em;
#endif
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr), flags_(nullptr), is_transpose_(false) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs){
out_index_shared_.reset(index);
out_neighbors_shared_.reset(neighs);
in_index_shared_ = out_index_shared_;
in_neighbors_shared_ = out_neighbors_shared_;
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
//adding flags used for deduplication
flags_ = new int[num_nodes_];
flags_shared_.reset(flags_);
//adding offsets for load balacne scheme
SetUpOffsets(true);
//Set this up for getting random neighbors
srand(time(NULL));
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs), is_transpose_(false){
num_edges_ = out_index_[num_nodes_] - out_index_[0];
out_index_shared_.reset(out_index);
out_neighbors_shared_.reset(out_neighs);
in_index_shared_.reset(in_index);
in_neighbors_shared_.reset(in_neighs);
flags_ = new int[num_nodes_];
flags_shared_.reset(flags_);
SetUpOffsets(true);
//Set this up for getting random neighbors
srand(time(NULL));
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs, bool is_transpose) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) , is_transpose_(is_transpose){
num_edges_ = out_index_[num_nodes_] - out_index_[0];
out_index_shared_.reset(out_index);
out_neighbors_shared_.reset(out_neighs);
in_index_shared_.reset(in_index);
in_neighbors_shared_.reset(in_neighs);
flags_ = new int[num_nodes_];
flags_shared_.reset(flags_);
SetUpOffsets(true);
//Set this up for getting random neighbors
srand(time(NULL));
}
CSRGraph(int64_t num_nodes, std::shared_ptr<DestID_*> out_index, std::shared_ptr<DestID_> out_neighs,
shared_ptr<DestID_*> in_index, shared_ptr<DestID_> in_neighs, bool is_transpose) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index.get()), out_neighbors_(out_neighs.get()),
in_index_(in_index.get()), in_neighbors_(in_neighs.get()) , is_transpose_(is_transpose){
num_edges_ = out_index_[num_nodes_] - out_index_[0];
out_index_shared_ = (out_index);
out_neighbors_shared_ = (out_neighs);
in_index_shared_ = (in_index);
in_neighbors_shared_ = (in_neighs);
flags_ = new int[num_nodes_];
flags_shared_.reset(flags_);
SetUpOffsets(true);
//Set this up for getting random neighbors
srand(time(NULL));
}
CSRGraph(CSRGraph& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_), is_transpose_(false){
/* Commenting this because object is not taking owner ship of the elements, notice destructor_free is set to false
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
other.flags_ = nullptr;
other.offsets_ = nullptr;
*/
out_index_shared_ = other.out_index_shared_;
out_neighbors_shared_ = other.out_neighbors_shared_;
in_index_shared_ = other.in_index_shared_;
in_neighbors_shared_ = other.in_neighbors_shared_;
//Set this up for getting random neighbors
srand(time(NULL));
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_), is_transpose_(false){
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
other.flags_ = nullptr;
other.offsets_ = nullptr;
out_index_shared_ = other.out_index_shared_;
out_neighbors_shared_ = other.out_neighbors_shared_;
in_index_shared_ = other.in_index_shared_;
in_neighbors_shared_ = other.in_neighbors_shared_;
other.out_index_shared_.reset();
other.out_neighbors_shared_.reset();
other.in_index_shared_.reset();
other.in_neighbors_shared_.reset();
other.flags_shared_.reset();
other.offsets_shared_.reset();
//Set this up for getting random neighbors
srand(time(NULL));
}
~CSRGraph() {
if (!is_transpose_)
ReleaseResources();
}
CSRGraph& operator=(CSRGraph& other) {
if (this != &other) {
if (!is_transpose_)
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
out_index_shared_ = other.out_index_shared_;
out_neighbors_shared_ = other.out_neighbors_shared_;
in_index_shared_ = other.in_index_shared_;
in_neighbors_shared_ = other.in_neighbors_shared_;
//need the following, otherwise would get double free errors
/*
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
other.flags_ = nullptr;
other.offsets_ = nullptr;
*/
}
return *this;
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
if (!is_transpose_ )
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
out_index_shared_ = other.out_index_shared_;
out_neighbors_shared_ = other.out_neighbors_shared_;
in_index_shared_ = other.in_index_shared_;
in_neighbors_shared_ = other.in_neighbors_shared_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
other.flags_ = nullptr;
other.offsets_ = nullptr;
other.out_index_shared_.reset();
other.out_neighbors_shared_.reset();
other.in_index_shared_.reset();
other.in_neighbors_shared_.reset();
other.flags_shared_.reset();
other.offsets_shared_.reset();
}
return *this;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n) const {
return Neighborhood(n, out_index_);
}
Neighborhood in_neigh(NodeID_ n) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood(n, in_index_);
}
NodeID_ get_random_out_neigh(NodeID_ n) {
int num_nghs = out_degree(n);
assert(num_nghs!=0);
int rand_index = rand() % num_nghs;
return out_index_[n][rand_index];
}
NodeID_ get_random_in_neigh(NodeID_ n) {
int num_nghs = in_degree(n);
assert(num_nghs!=0);
int rand_index = rand() % num_nghs;
return in_index_[n][rand_index];
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
void SetUpOffsets(bool in_graph = false) {
offsets_ = new SGOffset[num_nodes_+1];
offsets_shared_.reset(offsets_);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets_[n] = in_index_[n] - in_index_[0];
else
offsets_[n] = out_index_[n] - out_index_[0];
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
SegmentedGraph<DestID_, NodeID_>* getSegmentedGraph(std::string label, int id) {
return label_to_segment[label]->getSegmentedGraph(id);
}
int getNumSegments(std::string label) {
return label_to_segment[label]->numSegments;
}
void buildPullSegmentedGraphs(std::string label, int numSegments, bool numa_aware=false, std::string path="") {
auto graphSegments = new GraphSegments<DestID_,NodeID_>(numSegments, numa_aware);
label_to_segment[label] = graphSegments;
#ifdef LOADSEG
cout << "loading segmented graph from " << path << endl;
#pragma omp parallel for num_threads(numSegments)
for (int i = 0; i < numSegments; i++) {
FILE *in;
in = fopen((path + "/" + std::to_string(i)).c_str(), "r");
auto sg = graphSegments->getSegmentedGraph(i);
fread((void *) &sg->numVertices, sizeof(sg->numVertices), 1, in);
fread((void *) &sg->numEdges, sizeof(sg->numEdges), 1, in);
sg->allocate(i);
fread((void *) sg->graphId, sizeof(*sg->graphId), sg->numVertices, in);
fread((void *) sg->edgeArray, sizeof(*sg->edgeArray), sg->numEdges, in);
fread((void *) sg->vertexArray, sizeof(*sg->vertexArray), sg->numVertices + 1, in);
fclose(in);
}
return;
#endif
int segmentRange = (num_nodes() + numSegments - 1) / numSegments;
//Go through the original graph and count the number of target vertices and edges for each segment
for (auto d : vertices()){
for (auto s : in_neigh(d)){
int segment_id;
if (std::is_same<DestID_, NodeWeight<>>::value)
segment_id = static_cast<NodeWeight<>>(s).v/segmentRange;
else
segment_id = s/segmentRange;
graphSegments->getSegmentedGraph(segment_id)->countEdge(d);
}
}
//Allocate each segment
graphSegments->allocate();
//Add the edges for each segment
for (auto d : vertices()){
for (auto s : in_neigh(d)){
int segment_id;
if (std::is_same<DestID_, NodeWeight<>>::value)
segment_id = static_cast<NodeWeight<>>(s).v/segmentRange;
else
segment_id = s/segmentRange;
graphSegments->getSegmentedGraph(segment_id)->addEdge(d, s);
}
}
#ifdef STORESEG
cout << "output serialized graph segments to " << path << endl;
#pragma omp parallel for num_threads(numSegments)
for(int i = 0; i < numSegments; i++) {
FILE *out = fopen((path + "/" + std::to_string(i)).c_str(), "w");
auto sg = graphSegments->getSegmentedGraph(i);
fwrite((void *) &sg->numVertices, sizeof(sg->numVertices), 1, out);
fwrite((void *) &sg->numEdges, sizeof(sg->numEdges), 1, out);
fwrite((void *) sg->graphId, sizeof(*sg->graphId), sg->numVertices, out);
fwrite((void *) sg->edgeArray, sizeof(*sg->edgeArray), sg->numEdges, out);
fwrite((void *) sg->vertexArray, sizeof(*sg->vertexArray), sg->numVertices + 1, out);
fclose(out);
}
#endif
}
private:
// Making private so cannot be modified from outside
//useful for deduplication
int* flags_;
SGOffset * offsets_;
bool is_transpose_;
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
public:
std::shared_ptr<int> flags_shared_;
std::shared_ptr<SGOffset> offsets_shared_;
std::shared_ptr<DestID_*> out_index_shared_;
std::shared_ptr<DestID_> out_neighbors_shared_;
std::shared_ptr<DestID_*> in_index_shared_;
std::shared_ptr<DestID_> in_neighbors_shared_;
std::map<std::string, GraphSegments<DestID_,NodeID_>*> label_to_segment;
DestID_** get_out_index_(void) {
return out_index_;
}
DestID_* get_out_neighbors_(void) {
return out_neighbors_;
}
DestID_** get_in_index_(void) {
return in_index_;
}
DestID_* get_in_neighbors_(void) {
return in_neighbors_;
}
inline int* get_flags_() {
return flags_;
}
inline void set_flags_(int *flags) {
flags_ = flags;
flags_shared_.reset(flags);
}
inline SGOffset * get_offsets_(void) {
return offsets_;
}
};
#endif // GRAPH_H_
|
compute_kernels.c | /*
*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright (C) 2016, ARM Limited and contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*
*/
#include "compute_kernels.h"
void phase1_compute(const int num_iterations, const int array_size,
const int block_size, register double temp1, register double temp2,
register double temp3, register int int_temp1, register int int_temp2,
register int int_temp3, double *vals, int *int_vals, int validation_phase,
int num_threads
#if ENABLE_BINDING
, int num_cpus, int phase1_cpu_id, int bind_to_cpu_set
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
, PAPI_info *papi_info
#endif
#if RED_VALIDATION
, double *valid_red_vals, int *valid_red_int_vals
#endif
) {
#pragma omp parallel private (temp1, temp2, temp3, \
int_temp1, int_temp2, int_temp3) shared(vals, int_vals) \
if (!validation_phase) num_threads(num_threads)
{
#if ENABLE_BINDING
if (bind_to_cpu_set) {
bind_to_cpu_w_reset(phase1_cpu_id, num_cpus, 0);
} else {
#ifdef _OPENMP
bind_to_available_cpu_w_reset(phase1_cpu_id, num_cpus, 0,
omp_get_thread_num());
#endif
}
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
if ((retval = PAPI_start_counters(papi_info->event_code,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to start counters %d: %s\n", retval, handle_error(retval));
}
}
#endif
for (int iter = 0; iter < num_iterations; ++iter) {
#pragma omp for simd
for (int i = 0; i < array_size; i += block_size) {
for (int j = i; j < i + block_size; ++j) {
temp1 = vals[j];
int_temp1 = int_vals[j];
temp1 *= temp1;
temp2 = temp1 + vals[j];
temp3 = temp2 / (1024 + temp1);
temp3 -= vals[j];
int_temp1 *= int_temp1;
int_temp2 = int_temp1 + int_vals[j];
int_temp3 = int_temp2 / (1024 + int_temp1);
int_temp3 -= int_vals[j];
vals[j] = temp3;
int_vals[j] += (int_temp1 + int_temp2 + int_temp3) % 1024;
#if RED_VALIDATION
valid_red_vals[j] = vals[j];
valid_red_int_vals[j] = int_vals[j];
#endif
}
}
}
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
unsigned long long event_values[papi_info->total_events];
memset(event_values, 0, papi_info->total_events * sizeof(unsigned long long));
if ((retval = PAPI_stop_counters(event_values,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to stop counters %d: %s\n", retval, handle_error(retval));
}
for (int i = 0; i < papi_info->total_events; ++i) {
#ifdef _OPENMP
printf("Thread %d %s value = %lld\n", omp_get_thread_num(),
#else
printf("Thread %d %s value = %lld\n", 0,
#endif
papi_info->event_code_str[i],
event_values[i]);
}
}
#endif
}
}
void phase2_compute(const int num_iterations, const int array_size,
double *dest, double *src1, double *src2, int *ind_src2,
int validation_phase, int num_threads
#if ENABLE_BINDING
, int num_cpus, int phase2_cpu_id, int bind_to_cpu_set
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
, PAPI_info *papi_info
#endif
#if RED_VALIDATION
, double *valid_red_vals
#endif
) {
#pragma omp parallel shared(dest, src1, src2, ind_src2) \
if (!validation_phase) num_threads(num_threads)
{
#if ENABLE_BINDING
if (bind_to_cpu_set) {
bind_to_cpu_w_reset(phase2_cpu_id, num_cpus, 0);
} else {
#ifdef _OPENMP
bind_to_available_cpu_w_reset(phase2_cpu_id, num_cpus, 0,
omp_get_thread_num());
#endif
}
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
if ((retval = PAPI_start_counters(papi_info->event_code,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to start counters %d: %s\n", retval, handle_error(retval));
}
}
#endif
for (int iter = 0; iter < num_iterations; ++iter) {
#pragma omp for
for (int i = 0; i < array_size; ++i) {
dest[i] += src1[i] * src2[ind_src2[i]];
#if RED_VALIDATION
valid_red_vals[i] = dest[i];
#endif
}
}
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
unsigned long long event_values[papi_info->total_events];
memset(event_values, 0, papi_info->total_events * sizeof(unsigned long long));
if ((retval = PAPI_stop_counters(event_values,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to stop counters %d: %s\n", retval, handle_error(retval));
}
for (int i = 0; i < papi_info->total_events; ++i) {
#ifdef _OPENMP
printf("Thread %d %s value = %lld\n", omp_get_thread_num(),
#else
printf("Thread %d %s value = %lld\n", 0,
#endif
papi_info->event_code_str[i],
event_values[i]);
}
}
#endif
}
}
void phase3_compute(const int num_iterations, const int array_size,
double *vals, double *reduction_var, int validation_phase, int num_threads
#if ENABLE_BINDING
, int num_cpus, int phase3_cpu_id, int bind_to_cpu_set
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
, PAPI_info *papi_info
#endif
#if RED_VALIDATION
, double *valid_red_reduction_var
#endif
) {
double tmp_reduction_var = 0;
#pragma omp parallel shared(vals, tmp_reduction_var) if (!validation_phase) \
num_threads(num_threads)
{
#if ENABLE_BINDING
if (bind_to_cpu_set) {
bind_to_cpu_w_reset(phase3_cpu_id, num_cpus, 0);
} else {
#ifdef _OPENMP
bind_to_available_cpu_w_reset(phase3_cpu_id, num_cpus, 0,
omp_get_thread_num());
#endif
}
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
if ((retval = PAPI_start_counters(papi_info->event_code,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to start counters %d: %s\n", retval, handle_error(retval));
}
}
#endif
for (int iter = 0; iter < num_iterations; ++iter) {
#pragma omp for reduction(+:tmp_reduction_var)
for (int i = 0; i < array_size; ++i) {
vals[i] += 8;
tmp_reduction_var += vals[i];
}
int tmp_rounding = tmp_reduction_var * 1000000;
tmp_reduction_var = tmp_rounding / 1000000;
*reduction_var = fmod(tmp_reduction_var, 1024);
#pragma omp for
for (int i = 0; i < array_size; ++i) {
vals[i] = *reduction_var;
}
#if RED_VALIDATION
*valid_red_reduction_var = *reduction_var;
#endif
}
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
unsigned long long event_values[papi_info->total_events];
memset(event_values, 0, papi_info->total_events * sizeof(unsigned long long));
if ((retval = PAPI_stop_counters(event_values,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to stop counters %d: %s\n", retval, handle_error(retval));
}
for (int i = 0; i < papi_info->total_events; ++i) {
#ifdef _OPENMP
printf("Thread %d %s value = %lld\n", omp_get_thread_num(),
#else
printf("Thread %d %s value = %lld\n", 0,
#endif
papi_info->event_code_str[i],
event_values[i]);
}
}
#endif
}
}
void phase4_compute(const int num_iterations, const int array_size,
double *dest, double *src1, double *src2, int validation_phase,
int num_threads
#if ENABLE_BINDING
, int num_cpus, int phase4_cpu_id, int bind_to_cpu_set
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
, PAPI_info *papi_info
#endif
#if RED_VALIDATION
, double *valid_red_vals
#endif
) {
#pragma omp parallel shared(dest, src1, src2) \
if (!validation_phase) num_threads(num_threads)
{
#if ENABLE_BINDING
if (bind_to_cpu_set) {
bind_to_cpu_w_reset(phase4_cpu_id, num_cpus, 0);
} else {
#ifdef _OPENMP
bind_to_available_cpu_w_reset(phase4_cpu_id, num_cpus, 0,
omp_get_thread_num());
#endif
}
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
if ((retval = PAPI_start_counters(papi_info->event_code,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to start counters %d: %s\n", retval, handle_error(retval));
}
}
#endif
for (int iter = 0; iter < num_iterations; ++iter) {
#pragma omp for
for (int i = 0; i < array_size; ++i) {
dest[i] += src1[i] + src2[i];
#if RED_VALIDATION
valid_red_vals[i] = dest[i];
#endif
}
}
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
unsigned long long event_values[papi_info->total_events];
memset(event_values, 0, papi_info->total_events * sizeof(unsigned long long));
if ((retval = PAPI_stop_counters(event_values,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to stop counters %d: %s\n", retval, handle_error(retval));
}
for (int i = 0; i < papi_info->total_events; ++i) {
#ifdef _OPENMP
printf("Thread %d %s value = %lld\n", omp_get_thread_num(),
#else
printf("Thread %d %s value = %lld\n", 0,
#endif
papi_info->event_code_str[i],
event_values[i]);
}
}
#endif
}
}
void phase5_compute(const int num_iterations, const int array_size,
double *dest, double *src1, double *src2, int *ind_src1, int *ind_src2,
int validation_phase, int num_threads
#if ENABLE_BINDING
, int num_cpus, int phase5_cpu_id, int bind_to_cpu_set
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
, PAPI_info *papi_info
#endif
#if RED_VALIDATION
, double *valid_red_vals
#endif
) {
#pragma omp parallel shared(dest, src1, ind_src1, ind_src2, \
src2) if (!validation_phase) num_threads(num_threads)
{
#if ENABLE_BINDING
if (bind_to_cpu_set) {
bind_to_cpu_w_reset(phase5_cpu_id, num_cpus, 0);
} else {
#ifdef _OPENMP
bind_to_available_cpu_w_reset(phase5_cpu_id, num_cpus, 0,
omp_get_thread_num());
#endif
}
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
if ((retval = PAPI_start_counters(papi_info->event_code,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to start counters %d: %s\n", retval, handle_error(retval));
}
}
#endif
for (int iter = 0; iter < num_iterations; ++iter) {
#pragma omp for
for (int i = 0; i < array_size; ++i) {
dest[i] += src1[ind_src1[i]] + src2[ind_src2[i]];
#if RED_VALIDATION
valid_red_vals[i] = dest[i];
#endif
}
}
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
unsigned long long event_values[papi_info->total_events];
memset(event_values, 0, papi_info->total_events * sizeof(unsigned long long));
if ((retval = PAPI_stop_counters(event_values,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to stop counters %d: %s\n", retval, handle_error(retval));
}
for (int i = 0; i < papi_info->total_events; ++i) {
#ifdef _OPENMP
printf("Thread %d %s value = %lld\n", omp_get_thread_num(),
#else
printf("Thread %d %s value = %lld\n", 0,
#endif
papi_info->event_code_str[i],
event_values[i]);
}
}
#endif
}
}
void phase6_compute(const int num_iterations, const int nrow,
double **sparse_matrix_values, double *vect_in,
int **sparse_matrix_indeces, int *sparse_matrix_nonzeros, double *vect_out,
int validation_phase, int num_threads
#if ENABLE_BINDING
, int num_cpus, int phase6_cpu_id, int bind_to_cpu_set
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
, PAPI_info *papi_info
#endif
#if RED_VALIDATION
, double *valid_red_vals
#endif
) {
#pragma omp parallel if (!validation_phase) num_threads(num_threads) \
shared(sparse_matrix_values, sparse_matrix_indeces, sparse_matrix_nonzeros,\
vect_in, vect_out)
{
#if ENABLE_BINDING
if (bind_to_cpu_set) {
bind_to_cpu_w_reset(phase6_cpu_id, num_cpus, 0);
} else {
#ifdef _OPENMP
bind_to_available_cpu_w_reset(phase6_cpu_id, num_cpus, 0,
omp_get_thread_num());
#endif
}
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
if ((retval = PAPI_start_counters(papi_info->event_code,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to start counters %d: %s\n", retval, handle_error(retval));
}
}
#endif
for (int iter = 0; iter < num_iterations/5; ++iter) {
double reduction_var;
#pragma omp for private(reduction_var)
for (int i = 0 ; i < nrow; ++i) {
reduction_var = 0.0;
double * restrict values = sparse_matrix_values[i];
int *restrict cols = sparse_matrix_indeces[i];
const int nonzeros = sparse_matrix_nonzeros[i];
for (int j = 0; j < nonzeros; ++j) {
reduction_var += values[j] * vect_in[cols[j]];
}
vect_out[i] = reduction_var;
#if RED_VALIDATION
valid_red_vals[i] = reduction_var;
#endif
}
}
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
unsigned long long event_values[papi_info->total_events];
memset(event_values, 0, papi_info->total_events * sizeof(unsigned long long));
if ((retval = PAPI_stop_counters(event_values,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to stop counters %d: %s\n", retval, handle_error(retval));
}
for (int i = 0; i < papi_info->total_events; ++i) {
#ifdef _OPENMP
printf("Thread %d %s value = %lld\n", omp_get_thread_num(),
#else
printf("Thread %d %s value = %lld\n", 0,
#endif
papi_info->event_code_str[i],
event_values[i]);
}
}
#endif
}
}
void phase7_compute(const int num_iterations, const int array_size,
linked_list **llist,
int validation_phase, int num_threads
#if ENABLE_BINDING
, int num_cpus, int phase7_cpu_id, int bind_to_cpu_set
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
, PAPI_info *papi_info
#endif
#if RED_VALIDATION
, double *valid_red_reduction_var
#endif
) {
#pragma omp parallel firstprivate(llist) \
if (!validation_phase) num_threads(num_threads)
{
#ifdef _OPENMP
linked_list *start_node = llist[omp_get_thread_num()];
#else
linked_list *start_node = llist[0];
#endif
linked_list *orig_cur_node = malloc(sizeof(linked_list*));
linked_list *cur_node;
#if ENABLE_BINDING
if (bind_to_cpu_set) {
bind_to_cpu_w_reset(phase7_cpu_id, num_cpus, 0);
} else {
#ifdef _OPENMP
bind_to_available_cpu_w_reset(phase7_cpu_id, num_cpus, 0,
omp_get_thread_num());
#endif
}
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
if ((retval = PAPI_start_counters(papi_info->event_code,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to start counters %d: %s\n", retval, handle_error(retval));
}
}
#endif
for (int iter = 0; iter < num_iterations; ++iter) {
cur_node = orig_cur_node;
cur_node->value = start_node->value;
cur_node->next = start_node->next;
while (cur_node != NULL) {
#if RED_VALIDATION
if ((iter >= num_iterations - 1) && (cur_node->next == NULL)) {
*valid_red_reduction_var = cur_node->value;
}
#endif
cur_node = cur_node->next;
}
}
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
unsigned long long event_values[papi_info->total_events];
memset(event_values, 0, papi_info->total_events * sizeof(unsigned long long));
if ((retval = PAPI_stop_counters(event_values,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to stop counters %d: %s\n", retval, handle_error(retval));
}
for (int i = 0; i < papi_info->total_events; ++i) {
#ifdef _OPENMP
printf("Thread %d %s value = %lld\n", omp_get_thread_num(),
#else
printf("Thread %d %s value = %lld\n", 0,
#endif
papi_info->event_code_str[i],
event_values[i]);
}
}
#endif
}
}
void phase8_compute(const int num_iterations, const int num_particles,
particle* restrict particles, double* restrict forces,
int validation_phase, int num_threads
#if ENABLE_BINDING
, int num_cpus, int phase8_cpu_id, int bind_to_cpu_set
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
, PAPI_info *papi_info
#endif
#if RED_VALIDATION
, double *valid_red_vals
#endif
) {
/*
* This phase does a 3D distance calculation between particles and calculates
* the electrostatic force between pairs of points.
*/
#pragma omp parallel shared(forces) firstprivate(particles) \
if(!validation_phase) num_threads(num_threads)
{
double k = 8.987551 * 1000000000;
#if ENABLE_BINDING
if (bind_to_cpu_set) {
bind_to_cpu_w_reset(phase8_cpu_id, num_cpus, 0);
} else {
#ifdef _OPENMP
bind_to_available_cpu_w_reset(phase8_cpu_id, num_cpus, 0,
omp_get_thread_num());
#endif
}
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
if ((retval = PAPI_start_counters(papi_info->event_code,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to start counters %d: %s\n", retval, handle_error(retval));
}
}
#endif
for (int iter = 0; iter < num_iterations; ++iter) {
#pragma omp for simd
for (int i = 0; i < num_particles-1; ++i) {
double r = (particles[i+1].x - particles[i].x) *
(particles[i+1].x - particles[i].x) +
(particles[i+1].y - particles[i].y) *
(particles[i+1].y - particles[i].y) +
(particles[i+1].z - particles[i].z) *
(particles[i+1].z - particles[i].z);
forces[i] = (k * particles[i].charge * particles[i+1].charge) / r;
#if RED_VALIDATION
valid_red_vals[i] = forces[i];
#endif
}
}
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
unsigned long long event_values[papi_info->total_events];
memset(event_values, 0, papi_info->total_events * sizeof(unsigned long long));
if ((retval = PAPI_stop_counters(event_values,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to stop counters %d: %s\n", retval, handle_error(retval));
}
for (int i = 0; i < papi_info->total_events; ++i) {
#ifdef _OPENMP
printf("Thread %d %s value = %lld\n", omp_get_thread_num(),
#else
printf("Thread %d %s value = %lld\n", 0,
#endif
papi_info->event_code_str[i],
event_values[i]);
}
}
#endif
}
}
void phase9_compute(const int num_iterations, const int num_entries,
unsigned long* restrict palindromes, int validation_phase, int num_threads
#if ENABLE_BINDING
, int num_cpus, int phase9_cpu_id, int bind_to_cpu_set
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
, PAPI_info *papi_info
#endif
#if RED_VALIDATION
, unsigned long* restrict valid_red_ulong_vals
#endif
) {
/*
* Computes the first N palindromes
*/
unsigned int is_palindrome = 0;
unsigned long num = 0;
int found = 0;
unsigned long latest_pal = 0;
unsigned int latest_i = 0;
int total_palindromes = 0;
int from_zero = 0;
#pragma omp parallel firstprivate(palindromes, total_palindromes, from_zero) private(found, num, \
is_palindrome) \
if(!validation_phase) num_threads(num_threads) shared(latest_pal, latest_i)
{
#if ENABLE_BINDING
if (bind_to_cpu_set) {
bind_to_cpu_w_reset(phase9_cpu_id, num_cpus, 0);
} else {
#ifdef _OPENMP
bind_to_available_cpu_w_reset(phase9_cpu_id, num_cpus, 0,
omp_get_thread_num());
#endif
}
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
if ((retval = PAPI_start_counters(papi_info->event_code,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to start counters %d: %s\n", retval,
handle_error(retval));
}
}
#endif
for (int iter = 0; iter < num_iterations/10; ++iter) {
num = 0;
latest_pal = 0;
latest_i = 0;
#pragma omp for
for (int i = 0; i < num_entries; ++i) {
if (i == 0) {
palindromes[i] = num;
#if RED_VALIDATION
valid_red_ulong_vals[i] = palindromes[i];
#endif
} else {
if (i > latest_i) {
found = latest_i;
num = palindromes[found] + 1;
} else {
++from_zero;
found = -1;
num = 0;
}
while (found < i) {
if (num < 10) {
palindromes[i] = num;
is_palindrome = 1;
} else {
is_palindrome = 1;
unsigned long tmp_num = num;
unsigned int length = 0;
while (tmp_num) {
length += 1;
tmp_num /= 10;
}
tmp_num = num;
while (tmp_num) {
if ((tmp_num % 10) !=
(tmp_num / (unsigned long) pow(10, (length-1)))) {
is_palindrome = 0;
break;
}
tmp_num %= (unsigned long) pow(10, (length-1));
tmp_num /= 10;
length -= 2;
}
}
if (is_palindrome) {
++found;
}
++num;
}
palindromes[i] = num-1;
++total_palindromes;
if ((i > latest_i) && (palindromes[i] > latest_pal)) {
latest_pal = palindromes[i];
latest_i = i;
}
#if RED_VALIDATION
valid_red_ulong_vals[i] = palindromes[i];
#endif
}
}
}
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
unsigned long long event_values[papi_info->total_events];
memset(event_values, 0, papi_info->total_events * sizeof(unsigned long long));
if ((retval = PAPI_stop_counters(event_values,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to stop counters %d: %s\n", retval, handle_error(retval));
}
for (int i = 0; i < papi_info->total_events; ++i) {
#ifdef _OPENMP
printf("Thread %d %s value = %lld\n", omp_get_thread_num(),
#else
printf("Thread %d %s value = %lld\n", 0,
#endif
papi_info->event_code_str[i],
event_values[i]);
}
printf("Thread %d total_palindromes value = %d\n", omp_get_thread_num(), total_palindromes);
printf("Thread %d from_zero value = %d\n", omp_get_thread_num(), from_zero);
}
#endif
}
}
void phase10_compute(const int num_iterations, int num_randomloc,
int *randomloc, int validation_phase,
int num_threads
#if ENABLE_BINDING
, int num_cpus, int phase10_cpu_id, int bind_to_cpu_set
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
, PAPI_info *papi_info
#endif
#if RED_VALIDATION
, unsigned int* restrict valid_red_int_vals
#endif
) {
/*
* GUPS-like kernel
*/
int index;
unsigned int seed;
#pragma omp parallel firstprivate(randomloc, num_randomloc) \
private(index, seed)\
if(!validation_phase) num_threads(num_threads)
{
#if ENABLE_BINDING
if (bind_to_cpu_set) {
bind_to_cpu_w_reset(phase10_cpu_id, num_cpus, 0);
} else {
#ifdef _OPENMP
bind_to_available_cpu_w_reset(phase10_cpu_id, num_cpus, 0,
omp_get_thread_num());
#endif
}
#endif
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
if ((retval = PAPI_start_counters(papi_info->event_code,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to start counters %d: %s\n", retval,
handle_error(retval));
}
}
#endif
#ifdef _OPENMP
seed = omp_get_thread_num();
#else
seed = rand();
#endif
#pragma omp for
for (unsigned long iter = 0; iter < num_iterations * num_randomloc; ++iter) {
index = rand_r(&seed) % num_randomloc;
randomloc[index] = index;
}
#if ENABLE_PAPI && !(RED_VALIDATION || FULL_VALIDATION)
#pragma omp critical
{
int retval;
unsigned long long event_values[papi_info->total_events];
memset(event_values, 0, papi_info->total_events * sizeof(unsigned long long));
if ((retval = PAPI_stop_counters(event_values,
papi_info->total_events)) != PAPI_OK) {
printf("Failed to stop counters %d: %s\n", retval, handle_error(retval));
}
for (int i = 0; i < papi_info->total_events; ++i) {
#ifdef _OPENMP
printf("Thread %d %s value = %lld\n", omp_get_thread_num(),
#else
printf("Thread %d %s value = %lld\n", 0,
#endif
papi_info->event_code_str[i],
event_values[i]);
}
}
#endif
}
}
|
GB_binop__first_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__first_int64
// A.*B function (eWiseMult): GB_AemultB__first_int64
// A*D function (colscale): GB_AxD__first_int64
// D*A function (rowscale): GB_DxB__first_int64
// C+=B function (dense accum): GB_Cdense_accumB__first_int64
// C+=b function (dense accum): GB_Cdense_accumb__first_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_int64
// C=scalar+B GB_bind1st__first_int64
// C=scalar+B' GB_bind1st_tran__first_int64
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = aij
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = x ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_INT64 || GxB_NO_FIRST_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__first_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__first_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__first_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__first_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__first_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__first_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__first_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__first_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB_bind1st_tran__first_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_binop__cmplx_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__cmplx_fp32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__cmplx_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__cmplx_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__cmplx_fp32)
// C=scalar+B GB (_bind1st__cmplx_fp32)
// C=scalar+B' GB (_bind1st_tran__cmplx_fp32)
// C=A+scalar GB (_bind2nd__cmplx_fp32)
// C=A'+scalar GB (_bind2nd_tran__cmplx_fp32)
// C type: GxB_FC32_t
// A type: float
// B,b type: float
// BinaryOp: cij = GxB_CMPLXF (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GxB_CMPLXF (GBX (Ax, pA, A_iso), 0)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GxB_CMPLXF (GBX (Bx, pB, B_iso), 0)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GxB_CMPLXF (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CMPLX || GxB_NO_FP32 || GxB_NO_CMPLX_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__cmplx_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__cmplx_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__cmplx_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__cmplx_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__cmplx_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = GxB_CMPLXF (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__cmplx_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = GxB_CMPLXF (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = GxB_CMPLXF (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__cmplx_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = GxB_CMPLXF (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ikj_optimize.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int A_row;
int A_col;
int B_row;
int B_col;
int **constructMatrix(int row, int col){
int **matrix = (int **)malloc(sizeof(int *) * row);
for (int i = 0; i < row;i++){
matrix[i] = (int *)malloc(sizeof(int) * col);
}
return matrix;
}
void freeMatrix(int **matrix, int row, int col){
for (int i = 0; i < row;i++){
free(matrix[i]);
}
free(matrix);
}
int main(int argc, char *argv[]){
A_row = atoi(*(argv + 1));
A_col = atoi(*(argv + 2));
B_row = atoi(*(argv + 3));
B_col = atoi(*(argv + 4));
int number_of_threads = atoi(*(argv + 5));
FILE *input = fopen("matrix", "r");
int **A = constructMatrix(A_row, A_col);
int **B = constructMatrix(B_row, B_col);
int **C = constructMatrix(A_row, B_col);
//read A
for (int i = 0; i < A_row;i++){
for (int j = 0; j < A_col;j++){
fscanf(input, "%d", &A[i][j]);
}
}
//read B
for (int i = 0; i < B_row;i++){
for (int j = 0; j < B_col;j++){
fscanf(input, "%d", &B[i][j]);
}
}
fclose(input);
double start_time = omp_get_wtime();
//multiply:
int i, j, k;
int temp;
#pragma omp parallel for shared(A,B,C) private(i,j,k,temp) num_threads(number_of_threads)
for (i = 0; i < A_row;i++){
for (k = 0; k < B_row;k++){
temp = A[i][k];
for (j = 0; j < B_col;j++){
C[i][j] += temp * B[k][j];
}
}
}
double end_time = omp_get_wtime();
printf("%s: %g sec.\n", "ikj_optimize_runtime", end_time - start_time);
//output the result to compare with golden result
FILE *out = fopen("ikj_optimize_result", "w");
for (int i = 0; i < A_row;i++){
for (int j = 0; j < B_col;j++){
fprintf(out, "%d ", C[i][j]);
}
fprintf(out, "\n");
}
fprintf(out, "\n");
fclose(out);
freeMatrix(A, A_row, A_col);
freeMatrix(B, B_row, B_col);
freeMatrix(C, A_row, B_col);
return 0;
} |
neuron.h | /*
* Architektury výpočetních systémů (AVS 2019)
* Projekt c. 1 (ANN)
* Login: xstupi00
*/
/**
* @brief Returns output of the neuron as product of inputs, sums and bias
* @param inputSize - number of inputs the neuron
* @param input - pointer to neuron input array (identical for all neurons in the layer)
* @param weights - pointer to weights for all neurons in the layer
* @param bias - bias value of the neuron
* @return Output of the neuron
*/
// #pragma omp declare simd uniform(inputSize, input) linear(weight:512) simdlen(8) notinbranch
// #pragma omp declare simd uniform(inputSize, input) linear(weight:784) simdlen(8) notinbranch
#pragma omp declare simd uniform(inputSize, input) linear(weight) simdlen(8) notinbranch
float evalNeuron(
size_t inputSize,
const float* input,
const float* weight,
float bias
);
|
DMD5_fmt_plug.c | /*
* DMD5_fmt.c
*
* DIGEST-MD5 authentication module for Solar Designer's John the Ripper
* Uses Solar Designer's MD5 implementation.
*
* This software is Copyright 2006, regenrecht@o2.pl, and
* Copyright 2011, 2013 magnum, and it is hereby released to the general
* public under the following terms: Redistribution and use in source and
* binary forms, with or without modification, are permitted.
*
* Input format:
* $DIGEST-MD5$ username $ realm $ nonce $ digest_uri $ cnonce $ nc $ qop $ response [ $ authzid ]
*
* Just base64-decode the blob you see when sniffing, to get all data needed
* for above.
*
* See https://tools.ietf.org/html/rfc2831 (Using Digest Authentication as a
* SASL Mechanism) for algorithm details.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_DMD5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_DMD5);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "md5.h"
#include "common.h"
#include "formats.h"
#include "memdbg.h"
#define FORMAT_LABEL "dmd5"
#define FORMAT_NAME "DIGEST-MD5 C/R"
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define FORMAT_TAG "$DIGEST-MD5$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MD5_HEX_SIZE (2 * BINARY_SIZE)
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#define DSIZE (128 - sizeof(int))
#define CIPHERTEXT_LENGTH (DSIZE * 4)
#define PLAINTEXT_LENGTH 32
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static const char itoa16_shr_04[] =
"0000000000000000"
"1111111111111111"
"2222222222222222"
"3333333333333333"
"4444444444444444"
"5555555555555555"
"6666666666666666"
"7777777777777777"
"8888888888888888"
"9999999999999999"
"aaaaaaaaaaaaaaaa"
"bbbbbbbbbbbbbbbb"
"cccccccccccccccc"
"dddddddddddddddd"
"eeeeeeeeeeeeeeee"
"ffffffffffffffff";
static const char itoa16_and_0f[] =
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef";
static struct custom_salt {
unsigned char login_id[DSIZE]; // username:realm
unsigned int login_id_len;
unsigned char nonces[DSIZE]; // :nonce:cnonce[:authzid]
unsigned int nonces_len;
unsigned char prehash_KD[DSIZE]; // :nonce:nc:cnonce:qop:hex_A2_hash
unsigned int prehash_KD_len;
} *cur_salt;
static uint32_t (*crypt_key)[BINARY_SIZE/4];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static struct fmt_tests tests[] = {
{"$DIGEST-MD5$s3443$pjwstk$00$ldap/10.253.34.43$0734d94ad9abd5bd7fc5e7e77bcf49a8$00000001$auth-int$dd98347e6da3efd6c4ff2263a729ef77", "test"},
// Two hashes from https://tools.ietf.org/html/rfc2831#section-8
{"$DIGEST-MD5$chris$elwood.innosoft.com$OA6MG9tEQGm2hh$imap/elwood.innosoft.com$OA6MHXh6VqTrRk$00000001$auth$d388dad90d4bbd760a152321f2143af7", "secret"},
{"$DIGEST-MD5$chris$elwood.innosoft.com$OA9BSXrbuRhWay$acap/elwood.innosoft.com$OA9BSuZWMSpW8m$00000001$auth$6084c6db3fede7352c551284490fd0fc", "secret"},
{NULL}
};
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
PLAINTEXT_LENGTH + 1);
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
BINARY_SIZE);
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *data = ciphertext + FORMAT_TAG_LEN;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
if (strlen(ciphertext) > CIPHERTEXT_LENGTH)
return 0;
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64) // username
return 0;
data = p + 1; // realm
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64)
return 0;
data = p + 1; // nonce
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64)
return 0;
data = p + 1; // digest_uri
if (!(p = strchr(data, '$')) || (int)(p-data) >= DSIZE)
return 0;
data = p + 1; // cnonce
if (!(p = strchr(data, '$')) || (int)(p-data) > MD5_HEX_SIZE)
return 0;
/* if (hexlenl(data, 0) != p-data) // this is not always hex data!
return 0; */
data = p + 1; // nc
if (!(p = strchr(data, '$')) || (int)(p-data) >= 9)
return 0;
data = p + 1; // qop
if (strncmp(data, "auth", 4) && strncmp(data, "auth-int", 8) &&
strncmp(data, "auth-conf", 9))
return 0;
if (!(p = strchr(data, '$')) || (int)(p-data) >= 9)
return 0;
data = p + 1; // authzid, optional
if ((p = strchr(data, '$'))) {
if ((int)(p-data) > MD5_HEX_SIZE || strlen(&p[1]) >= 8)
return 0;
} else if (strlen(data) > MD5_HEX_SIZE)
return 0;
if (hexlenl(data, &extra) != MD5_HEX_SIZE || extra)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static uint32_t out[BINARY_SIZE/4];
char response[MD5_HEX_SIZE + 1];
unsigned int i;
char *p, *data = ciphertext + FORMAT_TAG_LEN;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$');
if (p && (p - data + 1) < sizeof(response))
strnzcpy(response, data, p - data + 1);
else
strnzcpy(response, data, sizeof(response));
for (i = 0; i < BINARY_SIZE; ++i)
((unsigned char*)out)[i] =
(atoi16[ARCH_INDEX(response[i*2])] << 4)
+ atoi16[ARCH_INDEX(response[i*2+1])];
return (void*)out;
}
static void *get_salt(char *ciphertext)
{
char username[64];
char realm[64];
char nonce[64];
char digest_uri[DSIZE];
char cnonce[MD5_HEX_SIZE + 1];
char nc[9];
char qop[9];
char authzid[8];
unsigned char *ptr_src, *ptr_dst, v, i;
char *ccopy = strdup(ciphertext);
char *p, *data = ccopy + FORMAT_TAG_LEN;
MD5_CTX ctx;
char A2[DSIZE];
unsigned char hash[BINARY_SIZE];
unsigned char hex_hash[2*MD5_HEX_SIZE];
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(username, data, sizeof(username));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(realm, data, sizeof(realm));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(nonce, data, sizeof(nonce));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(digest_uri, data, sizeof(digest_uri));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(cnonce, data, sizeof(cnonce));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(nc, data, sizeof(nc));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(qop, data, sizeof(qop));
data = p + 1;
if ((p = strchr(data, '$'))) {
*p = 0;
data = p + 1;
if (*data)
strnzcpy(authzid, data, sizeof(authzid));
else
*authzid = 0;
} else {
*authzid = 0;
}
if (!strcmp(qop, "auth"))
snprintf((char*)A2, sizeof(A2),
"AUTHENTICATE:%s", digest_uri);
else if (!strcmp(qop, "auth-int") || !strcmp(qop, "auth-conf"))
snprintf((char*)A2, sizeof(A2),
"AUTHENTICATE:%s:00000000000000000000000000000000",
digest_uri);
MD5_Init(&ctx);
MD5_Update(&ctx, A2, strlen((char*)A2));
MD5_Final(hash, &ctx);
ptr_src = hash;
ptr_dst = hex_hash;
for (i = 0; i < BINARY_SIZE; ++i) {
v = *ptr_src++;
*ptr_dst++ = itoa16_shr_04[ARCH_INDEX(v)];
*ptr_dst++ = itoa16_and_0f[ARCH_INDEX(v)];
}
*ptr_dst = 0;
snprintf((char*)cs.prehash_KD, sizeof(cs.prehash_KD),
":%s:%s:%s:%s:%s", nonce, nc, cnonce, qop, hex_hash);
cs.prehash_KD_len = strlen((char*)cs.prehash_KD);
if (authzid[0])
snprintf((char*)cs.nonces, sizeof(cs.nonces),
":%s:%s:%s", nonce, cnonce, authzid);
else
snprintf((char*)cs.nonces, sizeof(cs.nonces),
":%s:%s", nonce, cnonce);
cs.nonces_len = strlen((char*)cs.nonces);
snprintf((char*)cs.login_id, sizeof(cs.login_id),
"%s:%s:", username, realm);
cs.login_id_len = strlen((char*)cs.login_id);
MEM_FREE(ccopy);
return (void*)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void set_key(char *key, int index)
{
strnzcpyn(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char hash[16];
unsigned char hex_hash[MD5_HEX_SIZE];
unsigned char *ptr_src, *ptr_dst;
MD5_CTX ctx;
int i;
MD5_Init(&ctx);
// "username:realm"
MD5_Update(&ctx, cur_salt->login_id, cur_salt->login_id_len);
// "password"
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Final(hash, &ctx);
MD5_Init(&ctx);
// previous result
MD5_Update(&ctx, hash, BINARY_SIZE);
// ":nonce:cnonce[:authzid]"
MD5_Update(&ctx, cur_salt->nonces, cur_salt->nonces_len);
MD5_Final(hash, &ctx);
// hexify
ptr_src = hash;
ptr_dst = hex_hash;
for (i = 0; i < BINARY_SIZE; ++i) {
unsigned char v = *ptr_src++;
*ptr_dst++ = itoa16_shr_04[ARCH_INDEX(v)];
*ptr_dst++ = itoa16_and_0f[ARCH_INDEX(v)];
}
MD5_Init(&ctx);
// previous result, in hex
MD5_Update(&ctx, hex_hash, MD5_HEX_SIZE);
// ":nonce:nc:cnonce:qop:hex_A2_hash
MD5_Update(&ctx, cur_salt->prehash_KD, cur_salt->prehash_KD_len);
MD5_Final((unsigned char*)crypt_key[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
int index;
uint32_t b = ((uint32_t*)binary)[0];
for (index = 0; index < count; index++)
if (crypt_key[index][0] == b)
return 1;
return 0;
#else
return ((uint32_t*)binary)[0] == crypt_key[0][0];
#endif
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
struct fmt_main fmt_DMD5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
},
{
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-4,6),ceild(8*t2-Nz-11,24));t3<=min(floord(4*Nt+Ny-9,24),floord(4*t1+Ny-1,24));t3++) {
for (t4=max(max(ceild(t1-14,16),ceild(8*t2-Nz-51,64)),ceild(24*t3-Ny-51,64));t4<=min(min(floord(4*Nt+Nx-9,64),floord(4*t1+Nx-1,64)),floord(24*t3+Nx+11,64));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),6*t3+4),16*t4+14);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(64*t4,4*t5+4);
ubv=min(64*t4+63,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
jacobi7_save.c | #define max(a,b) (((a) < (b))? (b) : (a))
#define min(a,b) (((a) < (b))? (a) : (b))
#define _TH_1 2
#include <omp.h>
#define _NB_1 1
#define Index3D(_nx,_ny,_i,_j,_k) ((_i)+_nx*((_j)+_ny*(_k)))
void jacobi7(const int nx,const int ny,int nz,const double alpha,double* A0,const int timesteps,const double* B,const int ldb,double* Anext,const int ldc) {
double fac;
double* temp_ptr;
int i;int j;int k;int t;
fac = 6.0/(A0[0]*A0[0]);
int k_bk_1;
int k_bk_2;
int j_bk_3;
int i_bk_4;
/*@;BEGIN(Nest1=Nest)@*/for (t=0; t<timesteps; t+=1)
{
omp_set_num_threads(_TH_1);
#pragma omp parallel
{
/*@;BEGIN(Nest2_group3=Nest)@*/#pragma omp for private(k,j,i,k_bk_1,k_bk_2,j_bk_3,i_bk_4)
for (k_bk_1=1; k_bk_1<nz-1; k_bk_1+=_NB_1)
{
/*@;BEGIN(Nest2=Nest)@*/for (k_bk_2=0; k_bk_2<min(_NB_1,-k_bk_1+(-1+nz)); k_bk_2+=32)
{
/*@;BEGIN(Nest3=Nest)@*/for (j_bk_3=1; j_bk_3<-1+ny; j_bk_3+=32)
{
/*@;BEGIN(Nest4=Nest)@*/for (i_bk_4=1; i_bk_4<-1+nx; i_bk_4+=32)
{
for (k=0; k<min(32,min(_NB_1-k_bk_2,-k_bk_2+(-k_bk_1+(-1+nz)))); k+=1)
{
for (j=0; j<min(32,-j_bk_3+(-1+ny)); j+=1)
{
for (i=0; i<min(32,-i_bk_4+(-1+nx)); i+=1)
{
Anext[Index3D(nx,ny,i_bk_4+i,j_bk_3+j,k_bk_1+(k_bk_2+k))] = -(A0[Index3D(nx,ny,i_bk_4+i,j_bk_3+j,k_bk_1+(k_bk_2+k))]*fac)+(A0[Index3D(nx,ny,-1+(i_bk_4+i),j_bk_3+j,k_bk_1+(k_bk_2+k))]+(A0[Index3D(nx,ny,1+(i_bk_4+i),j_bk_3+j,k_bk_1+(k_bk_2+k))]+(A0[Index3D(nx,ny,i_bk_4+i,-1+(j_bk_3+j),k_bk_1+(k_bk_2+k))]+(A0[Index3D(nx,ny,i_bk_4+i,1+(j_bk_3+j),k_bk_1+(k_bk_2+k))]+(A0[Index3D(nx,ny,i_bk_4+i,j_bk_3+j,1+(k_bk_1+(k_bk_2+k)))]+A0[Index3D(nx,ny,i_bk_4+i,j_bk_3+j,-1+(k_bk_1+(k_bk_2+k)))])))));
}
}
}
}
}
}
}
}
temp_ptr = A0;
A0 = Anext;
Anext = temp_ptr;
}
}
|
general_basis_get_vec.h | #ifndef _GENERAL_BASIS_GET_VEC_H
#define _GENERAL_BASIS_GET_VEC_H
#include "general_basis_core.h"
#include "numpy/ndarraytypes.h"
template<class T>
bool inline update_out_dense(std::complex<double> c, int sign, npy_intp n_vec,const std::complex<T> *in, std::complex<T> *out){
for(npy_intp i=0;i<n_vec;i++){
out[i] += T(sign) * std::complex<T>(c) * in[i];
}
return true;
}
template<class T>
bool inline update_out_dense(std::complex<double> c, int sign, npy_intp n_vec,const T *in, T *out){
if(std::abs(c.imag())>1.1e-15){
return false;
}
else{
T re = c.real();
for(npy_intp i=0;i<n_vec;i++){
out[i] += T(sign) * re * in[i];
}
return true;
}
}
template<class I,class T>
bool get_vec_rep(general_basis_core<I> *B,
I s,
int &sign,
const int nt,
const npy_intp n_vec,
const npy_intp Ns_full,
const T in[],
std::complex<double> c,
T out[],
const int depth)
{
bool err = true;
if(nt<=0){
const npy_intp full = (Ns_full - s - 1)*n_vec;
err = update_out_dense(c,sign,n_vec,in,&out[full]);
return err;
}
int per = B->pers[depth];
double q = (2.0*M_PI*B->qs[depth])/per;
std::complex<double> cc = std::exp(std::complex<double>(0,-q));
if(depth < nt-1){
for(int j=0;j<per && err;j++){
err = get_vec_rep(B,s,sign,nt,n_vec,Ns_full,in,c,out,depth+1);
c *= cc;
s = B->map_state(s,depth,sign);
}
return err;
}
else{
for(int j=0;j<per && err;j++){
const npy_intp full = (Ns_full - s - 1)*n_vec;
err = update_out_dense(c,sign,n_vec,in,&out[full]);
c *= cc;
s = B->map_state(s,depth,sign);
}
return err;
}
}
template<class I,class J,class T>
bool get_vec_general_dense(general_basis_core<I> *B,
const I basis[],
const J n[],
const npy_intp n_vec,
const npy_intp Ns,
const npy_intp Ns_full,
const T in[],
T out[])
{
bool err = true;
const int nt = B->get_nt();
double norm = 1.0;
for(int i=0;i<nt;i++){
norm *= B->pers[i];
}
#pragma omp parallel for schedule(dynamic) firstprivate(norm)
for(npy_intp k=0;k<Ns;k++){
if(!err)
continue;
std::complex<double> c = 1.0/std::sqrt(n[k]*norm);
int sign = 1;
bool local_err = get_vec_rep(B,basis[k],sign,nt,n_vec,Ns_full,&in[k*n_vec],c,out,0);
if(!local_err){
#pragma omp critical
err = local_err;
}
}
return err;
}
#endif
|
GB_unaryop__abs_int8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int8_int8
// op(A') function: GB_tran__abs_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int8_int8
(
int8_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rose_reduction_2.c | /* A kernel for two level parallelizable loop with reduction */
#include "omp.h"
float u[100][100];
float foo()
{
int i;
int j;
float temp;
float error;
#pragma omp parallel for private (temp,i,j) reduction (+:error)
for (i = 0; i <= 99; i += 1) {
#pragma omp parallel for private (temp,j) reduction (+:error)
for (j = 0; j <= 99; j += 1) {
temp = u[i][j];
error = error + temp * temp;
}
}
return error;
}
|
km_config.h | /*
*
* Header file of the KM-config algorithm (C++ version)
*
*
* An algorithm for finding multiple core-periphery pairs in networks
*
*
* Core-periphery structure requires something else in the network
* Sadamori Kojaku and Naoki Masuda
* Preprint arXiv:1710.07076
*
*
* Please do not distribute without contacting the authors.
*
*
* AUTHOR - Sadamori Kojaku
*
*
* DATE - 11 Oct 2017
*/
#ifndef CP_ALGORITHM
#define CP_ALGORITHM
#include "cpalgorithm.h"
#endif
class KM_config: public CPAlgorithm{
public:
// Constructor
KM_config();
KM_config(int num_runs);
// function needed to be implemented
void detect(const Graph& G);
void calc_Q(
const Graph& G,
const vector<int>& c,
const vector<double>& x,
double& Q,
vector<double>& q);
protected:
private:
int _num_runs;
uniform_real_distribution<double> _udist;
void _km_config_label_switching(
const Graph& G,
const int num_of_runs,
vector<int>& c,
vector<double>& x,
double& Q,
vector<double>& q,
mt19937_64& mtrnd
);
double _calc_dQ_conf(double d_i_c,
double d_i_p,
double d_i,
double D_c,
double D_p,
double selfloop,
double x,
const double M);
void _propose_new_label(
const Graph& G,
const vector<int>& c,
const vector<double>& x,
const vector<double>& sum_of_deg_core,
const vector<double>& sum_of_deg_peri,
const double M,
const int node_id,
int& cprime,
double& xprime,
double& dQ,
mt19937_64& mtrnd
);
void _km_config_label_switching_core(
const Graph& G,
vector<int>& c,
vector<double>& x,
mt19937_64& mtrnd
);
void _km_config_louvain(
const Graph& G,
const int num_of_runs,
vector<int>& c,
vector<double>& x,
double& Q,
vector<double>& q,
mt19937_64& mtrnd
);
/*
void _km_config_louvain_core(
const Graph& G,
vector<int>& c,
vector<double>& x,
mt19937_64& mtrnd
);
*/
void _coarsing(
const Graph& G,
const vector<int>& c,
const vector<double>& x,
Graph& newG,
vector<int>& toLayerId
);
void _relabeling(vector<int>& c);
};
/*-----------------------------
Constructor
-----------------------------*/
KM_config::KM_config(int num_runs):CPAlgorithm(){
KM_config();
_num_runs = num_runs;
};
KM_config::KM_config():CPAlgorithm(){
uniform_real_distribution<double> tmp(0.0,1.0);
_udist = tmp;
_num_runs = 10;
_mtrnd = _init_random_number_generator();
};
/*-----------------------------
Functions inherited from the super class (CPAlgorithm)
-----------------------------*/
void KM_config::detect(const Graph& G){
//_km_config_label_switching(G, _num_runs, _c, _x, _Q, _q, _mtrnd);
_km_config_louvain(G, _num_runs, _c, _x, _Q, _q, _mtrnd);
}
void KM_config::calc_Q(
const Graph& G,
const vector<int>& c,
const vector<double>& x,
double& Q,
vector<double>& q)
{
int N = G.get_num_nodes();
int K = *max_element(c.begin(), c.end()) + 1;
q.assign(K, 0.0);
vector<double> Dc(K, 0.0);
vector<double> Dp(K, 0.0);
double double_M = 0.0;
for (int i = 0; i < N; i++) {
int sz = G.degree(i);
double di = 0;
for (int j = 0; j < sz; j++) {
Neighbour nn = G.get_kth_neighbour(i, j);
int nei = nn.get_node();
double wj = nn.get_w();
q[c[i]] += wj * !!(c[i] == c[nei]) * (x[i] + x[nei] - x[i] * x[nei]);
di+=wj;
}
Dc[c[i]] += x[i] * di;
Dp[c[i]] += (1-x[i]) * di;
double_M += di;
}
Q = 0;
for (int k = 0; k < K; k++) {
q[k] = (q[k] - (Dc[k] * Dc[k] + 2 * Dc[k] * Dp[k]) / double_M) / double_M;
Q += q[k];
}
}
/*-----------------------------
Private functions (internal use only)
-----------------------------*/
void KM_config::_km_config_label_switching(
const Graph& G,
const int num_of_runs,
vector<int>& c,
vector<double>& x,
double& Q,
vector<double>& q,
mt19937_64& mtrnd
)
{
int N = G.get_num_nodes();
c.clear();
x.clear();
c.assign(N, 0);
x.assign(N, 1.0);
Q = -1;
for (int i = 0; i < num_of_runs; i++) {
vector<int> ci;
vector<double> xi;
vector<double> qi;
double Qi = 0.0;
_km_config_label_switching_core(G, ci, xi, _mtrnd);
calc_Q(G, ci, xi, Qi, qi);
if (Qi > Q) {
c = ci;
x = xi;
q.clear();
q = qi;
Q = Qi;
}
}
}
/*
void KM_config::_km_config_label_switching(
const Graph& G,
const int num_of_runs,
vector<int>& c,
vector<double>& x,
double& Q,
vector<double>& q,
mt19937_64& mtrnd
)
{
int N = G.get_num_nodes();
c.clear();
x.clear();
c.assign(N, 0);
x.assign(N, true);
// Generate \hat q^{(s)} and \hat n^{(s)} (1 \leq s \leq S)
// create random number generator per each thread
int numthread = 1;
#ifdef _OPENMP
# pragma omp parallel
{
numthread = omp_get_num_threads();
}
#endif
cout<<numthread<<endl;
vector<mt19937_64> mtrnd_list(numthread);
for(int i = 0; i < numthread; i++){
mt19937_64 mtrnd = _init_random_number_generator();
mtrnd_list[i] = mtrnd;
}
Q = -1;
#ifdef _OPENMP
#pragma omp parallel for shared(c, x, Q, q, N, G, mtrnd_list)
#endif
for (int i = 0; i < num_of_runs; i++) {
vector<int> ci;
vector<double> xi;
vector<double> qi;
double Qi = 0.0;
int tid = 0;
#ifdef _OPENMP
tid = omp_get_thread_num();
#endif
mt19937_64 mtrnd = mtrnd_list[tid];
_km_config_label_switching_core(G, ci, xi, mtrnd);
calc_Q(G, ci, xi, Qi, qi);
#pragma omp critical
{
if (Qi > Q) {
c = ci;
x = xi;
q.clear();
q = qi;
Q = Qi;
}
}
}
}
*/
double KM_config::_calc_dQ_conf(double d_i_c,
double d_i_p,
double d_i,
double D_c,
double D_p,
double selfloop,
double x,
const double M)
{
return 2 * (d_i_c + d_i_p * (x) - d_i * (D_c + D_p * x) / (2.0 * M)) + x * (selfloop - d_i * d_i / (2.0 * M));
}
void KM_config::_propose_new_label(
const Graph& G,
const vector<int>& c,
const vector<double>& x,
const vector<double>& sum_of_deg_core,
const vector<double>& sum_of_deg_peri,
const double M,
const int node_id,
int& cprime,
double& xprime,
double& dQ,
mt19937_64& mtrnd)
{
int N = G.get_num_nodes();
int neighbourNum = G.degree(node_id);
double deg = G.wdegree(node_id);
vector<double> edges_to_core(N, 0.0);
vector<double> edges_to_peri(N, 0.0);
double selfloop = 0;
for (int j = 0; j < neighbourNum; j++) {
Neighbour nn = G.get_kth_neighbour(node_id, j);
int nei = nn.get_node();
double wj = nn.get_w();
if(node_id == nei){
selfloop+= wj;
continue;
}
edges_to_core[c[nei]] += wj * x[nei];
edges_to_peri[c[nei]] += wj * (1-x[nei]);
}
double D_core = sum_of_deg_core[c[node_id]] - deg * x[node_id];
double D_peri = sum_of_deg_peri[c[node_id]] - deg * (1-x[node_id]);
double dQold = _calc_dQ_conf(edges_to_core[c[node_id]], edges_to_peri[c[node_id]], deg,
D_core, D_peri, selfloop, x[node_id], M);
dQ = 0;
for (int j = 0; j < neighbourNum; j++) {
Neighbour nn = G.get_kth_neighbour(node_id, j);
int nei = nn.get_node();
//double wj = nn.get_w();
int cid = c[nei];
D_core = sum_of_deg_core[cid] - deg * x[node_id] * (double)!!(c[node_id] == cid);
D_peri = sum_of_deg_peri[cid] - deg * (1-x[node_id]) * (double)!!( c[node_id] == cid );
double Q_i_core = _calc_dQ_conf(edges_to_core[cid], edges_to_peri[cid],
deg, D_core, D_peri, selfloop, 1, M);
double Q_i_peri = _calc_dQ_conf(edges_to_core[cid], edges_to_peri[cid],
deg, D_core, D_peri, selfloop, 0, M);
Q_i_core -= dQold;
Q_i_peri -= dQold;
if (MAX(Q_i_core, Q_i_peri) < dQ)
continue;
if (Q_i_peri < Q_i_core) {
xprime = 1;
cprime = cid;
dQ = Q_i_core;
}
else if (Q_i_peri > Q_i_core) {
xprime = 0;
cprime = cid;
dQ = Q_i_peri;
}
else {
cprime = cid;
if(_udist(mtrnd) < 0.5){
xprime =1;
}else{
xprime =0;
}
dQ = Q_i_core;
}
}
}
void KM_config::_km_config_label_switching_core(
const Graph& G,
vector<int>& c,
vector<double>& x,
mt19937_64& mtrnd
)
{
/* Variable declarations */
int N = G.get_num_nodes();
vector<double> sum_of_deg_core(N);
vector<double> sum_of_deg_peri(N);
vector<int> order(N);
vector<double> degs(N);
double M = 0;
bool isupdated = false;
fill(sum_of_deg_core.begin(), sum_of_deg_core.end(), 0.0);
fill(sum_of_deg_peri.begin(), sum_of_deg_peri.end(), 0.0);
c.clear();
x.clear();
c.assign(N, 0);
x.assign(N, 1);
for (int i = 0; i < N; i++) {
order[i] = i;
c[i] = i;
double deg = G.wdegree(i);
degs[i] = deg;
sum_of_deg_core[i] += x[i] * deg;
M += deg;
};
M = M / 2;
/* Label switching algorithm */
do {
isupdated = false;
shuffle(order.begin(), order.end(), mtrnd);
for (int scan_count = 0; scan_count < N; scan_count++) {
int i = order[scan_count];
int cprime = c[i]; // c'
double xprime = x[i]; // x'
double dQ = 0;
_propose_new_label(G, c, x, sum_of_deg_core, sum_of_deg_peri,
M, i, cprime, xprime, dQ, mtrnd);
if (dQ <= 0)
continue;
if ( (c[i] == cprime) & (x[i] == xprime) )
continue;
double deg = degs[i];
sum_of_deg_core[c[i]] -= deg * x[i];
sum_of_deg_peri[c[i]] -= deg * (1-x[i]);
sum_of_deg_core[cprime] += deg * xprime;
sum_of_deg_peri[cprime] += deg * (1-xprime);
c[i] = cprime;
x[i] = xprime;
isupdated = true;
}
} while (isupdated == true);
/* Remove empty core-periphery pairs */
std::vector<int> labs;
for (int i = 0; i < N; i++) {
int cid = -1;
int labsize = (int) labs.size();
for (int j = 0; j < labsize; j++) {
if (labs[j] == c[i]) {
cid = j;
break;
}
}
if (cid < 0) {
labs.push_back(c[i]);
cid = (int)labs.size() - 1;
}
c[i] = cid;
}
}
/* Louvain algorithm */
void KM_config::_km_config_louvain(
const Graph& G,
const int num_of_runs,
vector<int>& c,
vector<double>& x,
double& Q,
vector<double>& q,
mt19937_64& mtrnd
){
// Intiialise variables
int N = G.get_num_nodes();
c.clear();
x.clear();
c.assign(N, 0);
x.assign(N, 1);
for (int i = 0; i < N; i++) c[i] = i;
vector<int>ct = c; // label of each node at tth iteration
vector<double>xt = x; // label of each node at tth iteration.
Graph cnet_G; // coarse network
vector<int> toLayerId; //toLayerId[i] maps 2*c[i] + x[i] to the id of node in the coarse network
_coarsing(G, ct, xt, cnet_G, toLayerId); // Initialise toLayerId
Q = 0; // quality of the current partition
int cnet_N;
do{
cnet_N = cnet_G.get_num_nodes();
// Core-periphery detection
vector<int> cnet_c; // label of node in the coarse network, Mt
vector<double> cnet_x; // label of node in the coarse network, Mt
double Qt = 0; vector<double> qt;
_km_config_label_switching(cnet_G, num_of_runs, cnet_c, cnet_x, Qt, qt, mtrnd);
//_km_config_label_switching_core(cnet_G, cnet_c, cnet_x, mtrnd);
// Update the label of node in the original network, ct and xt.
for(int i = 0; i< N; i++){
int cnet_id = toLayerId[2 * ct[i] + (int)xt[i]];
ct[i] = cnet_c[ cnet_id ];
xt[i] = cnet_x[ cnet_id ];
}
// Compute the quality
//calc_Q(G, ct, xt, Qt, qt);
calc_Q(cnet_G, cnet_c, cnet_x, Qt, qt);
if(Qt>=Q){ // if the quality is the highest among those detected so far
c = ct;
x = xt;
Q = Qt;
q = qt;
}
// Coarsing
Graph new_cnet_G;
_coarsing(cnet_G, cnet_c, cnet_x, new_cnet_G, toLayerId);
cnet_G = new_cnet_G;
//cout<<"---"<<cnet_G.get_num_nodes()<<" "<<cnet_G.get_num_edges()<<" "<<G.get_num_edges()<<"---"<<endl;
int sz = cnet_G.get_num_nodes();
if(sz == cnet_N) break;
}while( true );
_relabeling(c);
}
void KM_config::_coarsing(
const Graph& G,
const vector<int>& c,
const vector<double>& x,
Graph& newG,
vector<int>& toLayerId
){
int N = (int) c.size();
vector<int> ids(N,0);
int maxid = 0;
for(int i = 0;i<N;i++){
ids[i] = 2 * c[i] + (int)x[i];
maxid = MAX(maxid, ids[i]);
}
_relabeling(ids);
toLayerId.clear();
toLayerId.assign(maxid+1,0);
for(int i = 0;i<N;i++){
toLayerId[2 * c[i] + (int)x[i]] = ids[i];
}
int K = *max_element(ids.begin(), ids.end()) + 1;
newG = Graph(K);
for(int i = 0;i<N;i++){
int mi = 2 * c[i] + (int)x[i];
int sz = G.degree(i);
for(int j = 0;j<sz;j++){
Neighbour nb = G.get_kth_neighbour(i, j);
int nei = nb.get_node();
double w = nb.get_w();
int mj = 2 * c[nei] + (int)x[nei];
int sid = toLayerId[mi];
int did = toLayerId[mj];
newG.addEdge(sid, did, w);
}
}
newG.compress();
}
void KM_config::_relabeling(
vector<int>& c
){
int N = (int) c.size();
std::vector<int> labs;
for (int i = 0; i < N; i++) {
int cid = -1;
int labsize = (int) labs.size();
for (int j = 0; j < labsize; j++) {
if (labs[j] == c[i]) {
cid = j;
break;
}
}
if (cid < 0) {
labs.push_back(c[i]);
cid = (int) labs.size() - 1;
}
c[i] = cid;
}
}
|
task_in_joinbarrier.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
#define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN
#include "callback.h"
#include <omp.h>
int main()
{
int condition=0;
omp_set_nested(0);
print_frame(0);
#pragma omp parallel num_threads(2)
{
print_frame_from_outlined_fn(1);
print_ids(0);
print_ids(1);
print_frame(0);
#pragma omp master
{
print_ids(0);
#pragma omp task shared(condition)
{
OMPT_SIGNAL(condition);
print_frame(1);
print_ids(0);
print_ids(1);
print_ids(2);
}
OMPT_WAIT(condition,1);
print_ids(0);
}
print_ids(0);
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=0x{{[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2, codeptr_ra=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// nested parallel masters
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// <- ompt_event_task_create would be expected here
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter=0x{{[0-f]+}}, new_task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[TASK_FUNCTION:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// implicit barrier parallel
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]]
// implicit barrier parallel
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_schedule: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(1)=[[TASK_EXIT:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_schedule: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_end: task_id=[[TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
subtractImage_parallel_omp.c | #include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp> //
#include <iostream>
#include "x86intrin.h"
#include "immintrin.h"
#include "emmintrin.h"
#include "smmintrin.h"
#include "tmmintrin.h"
#include "cv.h"
#include "omp.h"
#include <math.h>
using namespace std;
int main()
{
IplImage *image1 = 0;
IplImage *image2 = 0;
IplImage *img1 = 0;
IplImage *img2 = 0;
IplImage *img_sub = 0;
unsigned char* data1;
unsigned char* data2;
unsigned char* data_dst;
// Ipp64u start, end, time1;
double start1,start2,end2;
double time1, time2;
int cols,rows;
const char* filename = (char*)"lena.png";
image1 = cvLoadImage( filename, 1 );
if (!image1)
{
printf( "can not read the image1 file\n");
return 0;
}
else
{
cvShowImage( "Original image1", image1 );
img1=cvCreateImage( cvSize(image1->width,image1->height), 8, 1 );
if ( image1->nChannels!=1)
cvCvtColor( image1, img1, CV_BGR2GRAY );
else
cvCopy( image1, img1);
data1 = (unsigned char *)img1->imageData;
}
filename = (char*)"lena.png";
image2 = cvLoadImage( filename, 1 );
if (!image2)
{
printf( "can not read the image2 file\n");
return 0;
}
else
{
cvShowImage( "Original image2", image2 );
img2=cvCreateImage( cvSize(image2->width,image2->height), 8, 1 );
if ( image2->nChannels!=1)
cvCvtColor( image2, img2, CV_BGR2GRAY );
else
cvCopy( image2, img2);
data2 = (unsigned char *)img2->imageData;
}
img_sub = cvCreateImage( cvSize(image2->width,image2->height), 8, 1 );
data_dst = (unsigned char*)img_sub->imageData;
////++++++++++++++++++++++++++SIMD PARALLELISM+++++++
cols= img1->width;
rows= img1->height;
start1 = omp_get_wtime();
for (int i=0; i < rows; i++)
for (int j=0; j < cols; j++)
data_dst[i*cols+j] = data1[i*cols+j]-data2[i*cols+j];
time1 = omp_get_wtime() - start1;
printf ("sub_img Serial = %.16g \n", time1);
cvShowImage( "output Serial", img_sub );
int p,p2;
for (p = 2; p < 50; p+=2)
{
time2=0;
for (p2 = 0; p2 < 5;)
{
start2 = omp_get_wtime();
#pragma omp parallel for num_threads(p) shared(data_dst,data2,data1)
#pragma for schedule( dynamic,1 )
for (int i=0; i < rows; i++)
for (int j=0; j < cols; j++)
data_dst[i*cols+j] = data1[i*cols+j]-data2[i*cols+j];
end2 = omp_get_wtime();
//end = ippGetCpuClocks();
if((end2-start2)<time1 ){
time2 +=end2-start2;
p2++;
}
}
time2=time2/p2;
printf ("sub_img Parallel = %.16g ", time2);
printf ("Speedup = %.16g \n", time1/time2);
}
cvShowImage( "output Parallel", img_sub );
cvReleaseImage( &img_sub );
cvWaitKey(0);
return 0;
} |
special_ops.h | #pragma once
#include <ops/ops.h>
#include <loops/reduce.h>
#include <loops/scalar.h>
#include <loops/indexreduce.h>
#include <loops/broadcasting.h>
namespace functions {
namespace broadcast {
template <typename T>
class Broadcast;
}
namespace transform {
template <typename T>
class Transform;
}
namespace scalar {
}
namespace reduce {
template <typename T>
class ReduceFunction;
}
}
namespace simdOps {
template<typename T>
class Pooling2D {
public:
static const bool requiresSpecial = true;
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
static int outSize(int size, int k, int s, int p, bool coverAll) {
if (coverAll)
return (size + p * 2 - k + s - 1) / s + 1;
else
return (size + p * 2 - k) / s + 1;
}
#ifdef __CUDACC__
/**
* Based on: https://github.com/pjreddie/darknet/blob/master/src/im2col_kernels.cu
*/
static inline __device__ void execSpecialCuda(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
__shared__ int kH;
__shared__ int kW;
__shared__ int sH;
__shared__ int sW;
__shared__ int pH;
__shared__ int pW;
__shared__ int dH;
__shared__ int dW;
__shared__ int poolingMode;
__shared__ T extraParam0;
__shared__ int batchSize;
__shared__ int inChannels;
__shared__ int outH;
__shared__ int outW;
__shared__ int inH;
__shared__ int inW;
//__shared__ int *strideIn;
//__shared__ int *strideOut;
__shared__ int strideB;
__shared__ int strideC;
__shared__ int strideY;
__shared__ int strideX;
__shared__ int strideOB;
__shared__ int strideOC;
__shared__ int strideOY;
__shared__ int strideOX;
__shared__ int length;
__shared__ int kHEff;
__shared__ int kWEff;
__shared__ bool fOrder;
if (threadIdx.x == 0) {
kH = (int)extraParams[0];
kW = (int)extraParams[1];
sH = (int)extraParams[2];
sW = (int)extraParams[3];
pH = (int)extraParams[4];
pW = (int)extraParams[5];
dH = (int)extraParams[6]; //Dilation, height dimension
dW = (int)extraParams[7]; //Dilation, width dimension
poolingMode = (int)extraParams[9];
extraParam0 = extraParams[10];
batchSize = shape::sizeAt(xShapeBuffer, 0);
inChannels = shape::sizeAt(xShapeBuffer, 1);
outH = shape::sizeAt(resultShapeBuffer, 2);
outW = shape::sizeAt(resultShapeBuffer, 3);
inH = shape::sizeAt(xShapeBuffer, 2);
inW = shape::sizeAt(xShapeBuffer, 3);
strideB = shape::stride(xShapeBuffer)[0];
strideC = shape::stride(xShapeBuffer)[1];
strideY = shape::stride(xShapeBuffer)[2];
strideX = shape::stride(xShapeBuffer)[3];
strideOB = shape::stride(resultShapeBuffer)[0];
strideOC = shape::stride(resultShapeBuffer)[1];
strideOY = shape::stride(resultShapeBuffer)[2];
strideOX = shape::stride(resultShapeBuffer)[3];
length = shape::length(resultShapeBuffer);
//Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH-1)*(dH-1);
kWEff = kW + (kW-1)*(dW-1);
fOrder = shape::order(resultShapeBuffer) == 'f';
/*
if (blockIdx.x == 0) {
printf("kH: %i; kW: %i; sH: %i; sW: %i; pH: %i; pW: %i; dH: %i; dW: %i; poolingMode: %i; extraParam0: %f;\n", kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, (float) extraParam0);
printf("batchSize: %i; inChannels: %i; outH: %i; outW: %i; inH: %i; inW: %i; strideB: %i; strideC: %i; strideY: %i; strideX: %i;\n", batchSize, inChannels, outH, outW, inH, inW, strideB, strideC, strideY, strideX);
}
*/
}
__syncthreads();
int tid = blockIdx.x * gridDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % outW;
const int ph = (index / outW) % outH;
const int c = (index / outW / outH) % inChannels;
const int n = index / outW / outH / inChannels;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
// const int hSO = hstart;
// const int hEO = hend;
if(hstart < 0){
int f = (int)nd4j::math::nd4j_ceil<T>((T) -hstart / (T)dH);
hstart += f * dH;
}
if(wstart < 0){
int f = (int)nd4j::math::nd4j_ceil<T>((T) -wstart / (T) dW);
wstart += f * dW;
}
if(hend > inH){
int f = (int)nd4j::math::nd4j_ceil<T>((T) (hend-inH) / (T) dH);
hend -= f * dH;
}
if(wend > inW){
int f = (int)nd4j::math::nd4j_ceil<T>((T) (wend-inW) / (T) dW);
wend -= f * dW;
}
int pool_size = (int)(nd4j::math::nd4j_ceil<T>((T) (hend-hstart) / (T) dH) * (int) nd4j::math::nd4j_ceil<T>((T) (wend-wstart) / (T) dW)); //Accounts for dilation
T sum = poolingMode == 0 ? (T) -MAX_FLOAT : (T) 0;
T *input_slice = dx + (n * strideB + c * strideC);
if (poolingMode == 0) {
for (int h = hstart; h < hend; h += dH) {
for (int w = wstart; w < wend; w += dW) {
T v = input_slice[h * strideY + w * strideX];
if (v > sum)
sum = v;
}
}
} else if (poolingMode == 1) {
for (int h = hstart; h < hend; h += dH) {
for (int w = wstart; w < wend; w += dW) {
sum += input_slice[h * strideY + w * strideX];
}
}
} else if (poolingMode == 2) {
for (int h = hstart; h < hend; h += dH) {
for (int w = wstart; w < wend; w += dW) {
sum += nd4j::math::nd4j_pow<T>(nd4j::math::nd4j_abs<T>(input_slice[h * strideY + w * strideX]), extraParam0);
}
}
}
T res;
if (poolingMode == 0) {
res = sum;
} else if (poolingMode == 1) {
int divide_factor = pool_size; //Case 0: exclude padding
if ((int) extraParam0 == 1) //Case 1: include padding
divide_factor = kH * kW;
res = sum / divide_factor;
} else if (poolingMode == 2) {
res = nd4j::math::nd4j_pow<T>(sum, (T) 1.0f / extraParam0);
}
if (!fOrder) {
result[index] = res;
} else {
result[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = res;
}
/*
if (index >= 0 && index < 400000) {
printf("index: %i; hstart: %i; hend: %i; wstart: %i; wend: %i; ph: %i; pw: %i; hstart_orig: %i; hend_orig: %i;\n", index, hstart, hend, wstart, wend, ph, pw, hSO, hEO);
}
*/
}
}
#endif
static void execSpecial(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
int kH = (int)extraParams[0];
int kW = (int)extraParams[1];
int sH = (int)extraParams[2];
int sW = (int)extraParams[3];
int pH = (int)extraParams[4];
int pW = (int)extraParams[5];
int dH = (int)extraParams[6]; //Dilation, height dimension
int dW = (int)extraParams[7]; //Dilation, width dimension
int poolingMode = (int)extraParams[9];
T extraParam0 = extraParams[10];
const int kHEff = kH + (kH-1)*(dH-1);
const int kWEff = kW + (kW-1)*(dW-1);
const int batchSize = (int) shape::sizeAt(xShapeBuffer, 0);
const int inChannels = (int) shape::sizeAt(xShapeBuffer, 1);
const int outH = (int) shape::sizeAt(resultShapeBuffer, 2);
const int outW = (int) shape::sizeAt(resultShapeBuffer, 3);
const int inH = (int) shape::sizeAt(xShapeBuffer, 2);
const int inW = (int) shape::sizeAt(xShapeBuffer, 3);
auto strideIn = shape::stride(xShapeBuffer);
auto strideOut = shape::stride(resultShapeBuffer);
const bool fOrder = shape::order(resultShapeBuffer) == 'f';
const Nd4jLong zLength = shape::length(resultShapeBuffer);
const int zRank = shape::rank(resultShapeBuffer);
int indices[6];
int idx = 0;
#pragma omp parallel for collapse(2) schedule(guided) shared(indices)
for(int k = 0; k < inChannels; k++)
{
for(int p = 0; p < batchSize; p++)
{
int xx, yy;
/* For all output pixels... */
const int _b = p * strideOut[0];
const int _k = k * strideOut[1];
T *ptr_output = result + _b + _k;
T *ptr_input = dx + p * strideIn[0] + k * strideIn[1];
for(yy = 0; yy < outH; yy++)
{
for(xx = 0; xx < outW; xx++)
{
/* Compute the mean of the input image... */
int hstart = yy * sH - pH;
int wstart = xx * sW - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
const int hSO = hstart;
const int hEO = hend;
if(hstart < 0){
int n = (int)nd4j::math::nd4j_ceil<T>((T) -hstart / ((T)dH));
hstart += n * dH;
}
if(wstart < 0){
int n = (int)nd4j::math::nd4j_ceil<T>((T) -wstart / ((T)dW));
wstart += n * dW;
}
if(hend > inH){
int n = (int)nd4j::math::nd4j_ceil<T>((T)(hend-inH)/((T)dH));
hend -= n * dH;
}
if(wend > inW){
int n = (int)nd4j::math::nd4j_ceil<T>((T)(wend-inW)/((T)dW));
wend -= n * dW;
}
int pool_size = (int)(nd4j::math::nd4j_ceil<T>((T) (hend-hstart)/((T)dH))
* (int)nd4j::math::nd4j_ceil<T>((T)(wend-wstart)/((T)dW))); //Accounts for dilation
T sum = poolingMode == 0 ? (T) -MAX_FLOAT : (T) 0;
// we need this only for avg pooling
int divide_factor = 0;
if (poolingMode == 1) {
if ((int) extraParam0 == 0) //Exclude padding
divide_factor = pool_size;
else if ((int) extraParam0 == 1) //Include padding
divide_factor = kH * kW;
}
long kx, ky;
if (poolingMode == 0) {
#pragma omp simd reduction(maxT:sum) collapse(2)
for (ky = hstart; ky < hend; ky += dH) {
for (kx = wstart; kx < wend; kx += dW)
if (ptr_input[ky * strideIn[2] + kx * strideIn[3]] > sum)
sum = ptr_input[ky * strideIn[2] + kx * strideIn[3]];
}
} else if (poolingMode == 1) {
#pragma omp simd reduction(sumT:sum) collapse(2)
for (ky = hstart; ky < hend; ky += dH) {
for (kx = wstart; kx < wend; kx += dW)
sum += ptr_input[ky * strideIn[2] + kx * strideIn[3]];
}
} else if (poolingMode == 2) {
#pragma omp simd reduction(sumT:sum) collapse (2)
for (ky = hstart; ky < hend; ky += dH) {
for (kx = wstart; kx < wend; kx += dW)
sum += nd4j::math::nd4j_pow<T>(nd4j::math::nd4j_abs<T>(ptr_input[ky * strideIn[2] + kx * strideIn[3]]), extraParam0);
}
}
/* Update output */
T res = sum;
if (poolingMode == 1) {
res /= divide_factor;
} else if (poolingMode == 2)
res = nd4j::math::nd4j_pow<T>(res, (T) 1.0f / extraParam0);
if (!fOrder) {
*ptr_output++ = res;
} else {
result[_b + _k + yy * strideOut[2] + xx * strideOut[3]] = res;
}
}
}
}
}
}
op_def static T op(T d1, T *params) {
return d1;
}
/** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc
* normally negative indices are bad, OK here because of other checks on input indices
* Uses unrolled loop specifically for length 4
*/
static _CUDA_HD int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) {
int offset = baseOffset;
if (shape[0] != 1) offset += indices[0] * stride[0];
if (shape[1] != 1) offset += indices[1] * stride[1];
if (shape[2] != 1) offset += indices[2] * stride[2];
if (shape[3] != 1) offset += indices[3] * stride[3];
return offset;
}
/**
* A version of Shape.getOffset without checking on input for negative indices etc
* normally negative indices are bad, OK here because of other checks on input indices
* Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here)
*/
static _CUDA_HD int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) {
int offset = baseOffset;
if (shape[0] != 1) offset += indices[0] * stride[0];
if (shape[1] != 1) offset += indices[1] * stride[1];
if (shape[4] != 1) offset += indices[4] * stride[4];
if (shape[5] != 1) offset += indices[5] * stride[5];
return offset;
}
};
FORCEINLINE bool is_a_ge_zero_and_a_lt_b(int a, int b) {
return static_cast<unsigned>(a) < static_cast<unsigned>(b);
}
template<typename T>
class
Im2col {
public:
static const bool requiresSpecial = true;
static _CUDA_HD int outSize(int size, int k, int s, int p, bool coverAll) {
if (coverAll)
return (size + p * 2 - k + s - 1) / s + 1;
else
return (size + p * 2 - k) / s + 1;
}
#ifdef __CUDACC__
/**
* Based on: https://github.com/pjreddie/darknet/blob/master/src/im2col_kernels.cu
*/
static inline __device__ void execSpecialCuda(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
/*kernel[0], kernel[1], stride[0], stride[1], padding[0], padding[1], 0, false*/
int kernelHeight = (int)extraParams[0];
int kernelWidth = (int)extraParams[1];
int strideY = (int)extraParams[2];
int strideX = (int)extraParams[3];
int padHeight = (int)extraParams[4];
int padWidth = (int)extraParams[5];
int dY = (int)extraParams[6]; //Dilation, height/y dimension
int dX = (int)extraParams[7]; //Dilation, width/x dimension
int kSize = kernelWidth * kernelHeight;
T zeroPadVal = (T)extraParams[9]; //Value to use when value is padding. Usually 0 but not always
auto outShape = shape::shapeOf(resultShapeBuffer);
auto resultOrder = shape::order(resultShapeBuffer);
auto outStride = shape::stride(resultShapeBuffer);
auto inShape = shape::shapeOf(xShapeBuffer);
auto inStride = shape::stride(xShapeBuffer);
int samples = inShape[0];
int depth = inShape[1];
int height = inShape[2];
int width = inShape[3];
int strideex = inStride[0];
int stridech = inStride[1];
int strideh = inStride[2];
int stridew = inStride[3];
// (height + 2 * padHeight - kernelHeight) / strideX + 1; //
// (width + 2 * padWidth - kernelWidth) / strideY + 1; //
int height_col = outShape[4];
int width_col = outShape[5];
int n = samples * depth * height_col * width_col;
/*
if (threadIdx.x == 0)
printf("Kernel h: [%i], w: [%i]; Col h: [%i], w: [%i]; Stride x: [%i], y: [%i]; Height: [%i], Width: [%i], Depth: [%i], N: [%i], Samples: [%i]\n",
kernelHeight, kernelWidth, height_col, width_col, strideX, strideY, height, width, depth, n, samples);
*/
int index = blockIdx.x * blockDim.x + threadIdx.x;
for (; index < n; index += blockDim.x*gridDim.x) {
int h_index = index / width_col;
int h_col = h_index % height_col;
int w_col = index % width_col;
int c_im = h_index / height_col;
int c_col = c_im * kSize;
int depth_im = c_im % depth;
int num_im = c_im / depth;
int h_offset = h_col * strideY - padHeight;
int w_offset = w_col * strideX - padWidth;
T* data_col_ptr = result;
int i_c = (c_col * height_col + h_col) * width_col + w_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
T* data_im_ptr = dx;
data_im_ptr += num_im * strideex + depth_im * stridech + h_offset * strideh + w_offset*stridew;
for (int i = 0; i < kernelHeight; ++i) {
for (int j = 0; j < kernelWidth; ++j) {
int h_im = h_offset + i * dY;
int w_im = w_offset + j * dX;
int i_f = 0;
int i_c_temp = i_c;
for (int dim = 5; dim >= 0; dim--) {
i_f += (i_c_temp % outShape[dim]) * outStride[dim];
i_c_temp = i_c_temp / outShape[dim];
}
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width){
result[i_f] = data_im_ptr[i * dY * strideh + j * dX * stridew];
} else result[i_f] = zeroPadVal;
//result[i_f] = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? data_im_ptr[i * strideh + j*stridew] : 0;
data_col_ptr += height_col * width_col;
i_c += height_col * width_col;
}
}
}
}
#endif
static void execSpecial(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
/*kernel[0], kernel[1], stride[0], stride[1], padding[0], padding[1], 0, false*/
T zeroPadVal = (T) 0.0f;
int kH = (int)extraParams[0];
int kW = (int)extraParams[1];
int sH = (int)extraParams[2];
int sW = (int)extraParams[3];
int pH = (int)extraParams[4];
int pW = (int)extraParams[5];
int dH = (int)extraParams[6]; //Dilation, height/y dimension
int dW = (int)extraParams[7]; //Dilation, width/x dimension
auto outShape = shape::shapeOf(resultShapeBuffer);
auto outStride = shape::stride(resultShapeBuffer);
auto inShape = shape::shapeOf(xShapeBuffer);
auto inStride = shape::stride(xShapeBuffer);
const int bS = inShape[0];
const int iC = inShape[1];
const int iH = inShape[2];
const int iW = inShape[3];
const int oH = outShape[4];
const int oW = outShape[5];
const int outStride0 = outStride[0];
const int outStride1 = outStride[1];
const int outStride2 = outStride[2];
const int outStride3 = outStride[3];
const int outStride4 = outStride[4];
const int outStride5 = outStride[5];
const int inStride0 = inStride[0];
const int inStride1 = inStride[1];
const int inStride2 = inStride[2];
const int inStride3 = inStride[3];
const T* in0End = dx + inStride1 * iC;
const int kRowEnd = -pH + kH * dH;
const int kColEnd = -pW + kW * dW;
const int oHW = oH * oW;
const int inRowEnd = oH * sH;
const int inColEnd = oW * sW;
int inRowStart, inColStart, inRow, inCol;
T *in0, *in1;
if (shape::order(xShapeBuffer) == 'c' && shape::order(resultShapeBuffer) == 'c' && shape::strideDescendingCAscendingF(xShapeBuffer) && shape::strideDescendingCAscendingF(resultShapeBuffer)) {
#pragma omp parallel for schedule(static) proc_bind(close) private(in0, in1, inRowStart, inColStart, inRow, inCol)
for (int b = 0; b < bS; b++) {
in0 = dx + (b * inStride0);
T *output = result + (b * outStride0);
for (int channel = 0; channel < iC; ++channel, in0 += inStride1) {
for (int kRow = 0; kRow < kH; kRow++) {
inRowStart = -pH + kRow * dH;
for (int kCol = 0; kCol < kW; kCol++) {
inRow = inRowStart;
inColStart = -pW + kCol * dW;
for (int outRow = 0; outRow < oH; ++outRow, inRow += sH) {
if (!is_a_ge_zero_and_a_lt_b(inRow, iH))
for (int outCol = 0; outCol < oW; ++outCol, ++output) {
*output = zeroPadVal;
}
else {
inCol = inColStart;
in1 = in0 + inRow * inStride2;
for (int outCol = 0; outCol < oW; ++outCol, inCol += sW, ++output)
if (is_a_ge_zero_and_a_lt_b(inCol, iW))
*output = *(in1 + inCol * inStride3);
else
*output = zeroPadVal;
}
}
}
}
}
}
}
else {
T *out0, *out1, *out2, *out3, *out4;
#pragma omp parallel for schedule(static) proc_bind(close) private(in0, in1, out0, out1, out2, out3, out4, inRowStart, inColStart, inRow, inCol)
for (int b = 0; b < bS; b++) {
in0 = dx + (b * inStride0);
out0 = result + b * outStride0;
for (int channel = 0; channel < iC; ++channel, in0 += inStride1, out0+=outStride1) {
out1 = out0;
for (int kRow = 0; kRow < kH; kRow++, out1 += outStride2) {
out2 = out1;
inRowStart = -pH + kRow * dH;
for (int kCol = 0; kCol < kW; kCol++, out2 += outStride3) {
out3 = out2;
inRow = inRowStart;
inColStart = -pW + kCol * dW;
for (int outRow = 0; outRow < oH; ++outRow, inRow += sH, out3 += outStride4) {
out4 = out3;
if (!is_a_ge_zero_and_a_lt_b(inRow, iH))
for (int outCol = 0; outCol < oW; ++outCol, out4 += outStride5) {
*out4 = zeroPadVal;
}
else {
inCol = inColStart;
in1 = in0 + inRow * inStride2;
for (int outCol = 0; outCol < oW; ++outCol, inCol += sW, out4 += outStride5) {
if (is_a_ge_zero_and_a_lt_b(inCol, iW))
*out4 = *(in1 + inCol * inStride3);
else
*out4 = zeroPadVal;
}
}
}
}
}
}
}
}
}
op_def static T op(T d1, T *params) {
return d1;
}
/** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc
* normally negative indices are bad, OK here because of other checks on input indices
* Uses unrolled loop specifically for length 4
*/
static _CUDA_HD int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) {
int offset = baseOffset;
if (shape[0] != 1) offset += indices[0] * stride[0];
if (shape[1] != 1) offset += indices[1] * stride[1];
if (shape[2] != 1) offset += indices[2] * stride[2];
if (shape[3] != 1) offset += indices[3] * stride[3];
return offset;
}
/**
* A version of Shape.getOffset without checking on input for negative indices etc
* normally negative indices are bad, OK here because of other checks on input indices
* Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here)
*/
static _CUDA_HD int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) {
int offset = baseOffset;
if (shape[0] != 1) offset += indices[0] * stride[0];
if (shape[1] != 1) offset += indices[1] * stride[1];
if (shape[4] != 1) offset += indices[4] * stride[4];
if (shape[5] != 1) offset += indices[5] * stride[5];
return offset;
}
};
template<typename T>
class Histogram {
public:
static const bool requiresSpecial = true;
#ifdef __CUDACC__
static inline __device__ void execSpecialCuda(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
int numBins = (int) extraParams[0];
T min_val = extraParams[1];
T max_val = extraParams[2];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ T *bins;
__shared__ int length;
__shared__ T *reductor;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
bins = (T *) shmem;
reductor = ((T *) allocationPointer) + (numBins * blockIdx.x);
length = shape::length(xShapeBuffer);
}
__syncthreads();
T binSize = (max_val - min_val) / (numBins);
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
bins[e] = (T) 0.0f;
}
__syncthreads();
for (int e = tid; e < length; e+= blockDim.x * gridDim.x) {
int idx = (int) ((dx[e] - min_val) / binSize);
if (idx < 0) idx = 0;
else if (idx >= numBins) idx = numBins - 1;
nd4j::math::atomics::nd4j_atomicAdd(&bins[idx], (T) 1.0f);
}
__syncthreads();
// transfer shared memory to reduction memory
if (gridDim.x > 1) {
unsigned int *tc = (unsigned int *)reductionPointer;
__shared__ bool amLast;
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
reductor[e] = bins[e];
}
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
// nullify shared memory for future accumulation
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
bins[e] = (T) 0.0f;
}
// accumulate reduced bins
for (int r = 0; r < gridDim.x; r++) {
T *ptrBuf = ((T *)allocationPointer) + (r * numBins);
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
bins[e] += ptrBuf[e];
}
}
__syncthreads();
// write them out to Z
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
result[e] = bins[e];
}
}
} else {
// if there's only 1 block - just write away data
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
result[e] = bins[e];
}
}
};
#endif
static void execSpecial(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
int length = shape::length(xShapeBuffer);
int _threads = 2;
int numBins = (int) extraParams[0];
int span = (length / _threads) + 8;
// get min over input
T min_val = extraParams[1];
T max_val = extraParams[2];
/*
#pragma omp parallel for simd num_threads(_threads) if (_threads > 1) reduction(min:min_val) proc_bind(close)
for (int x = 0; x < length; x++) {
if (min_val > dx[x])
min_val = dx[x];
}
// get max over input
T max_val = (T) MIN_FLOAT;
#pragma omp parallel for simd num_threads(_threads) if (_threads > 1) reduction(max:max_val) proc_bind(close)
for (int x = 0; x < length; x++) {
if (max_val < dx[x])
max_val = dx[x];
}
*/
T binSize = (max_val - min_val) / (numBins);
#pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(close) default(shared)
{
int tid, start, end;
int *bins = new int[numBins];
std::memset(bins, 0, sizeof(int) * numBins);
tid = omp_get_thread_num();
start = span * tid;
end = span * (tid + 1);
if (end > length) end = length;
#pragma omp simd
for (int x = start; x < end; x++) {
int idx = (int) ((dx[x] - min_val) / binSize);
if (idx < 0)
idx = 0;
else if (idx >= numBins)
idx = numBins - 1;
bins[idx]++;
}
#pragma omp critical
{
#pragma omp simd
for (int x = 0; x < numBins; x++) {
result[x] += bins[x];
}
}
delete[] bins;
}
}
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class Col2Im {
public:
static const bool requiresSpecial = true;
#ifdef __CUDACC__
/**
* https://github.com/pjreddie/darknet/blob/master/src/col2im_kernels.cu
*/
static inline __device__ void execSpecialCuda(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto inShape = shape::shapeOf(xShapeBuffer);
auto inStride = shape::stride(xShapeBuffer);
int strideex = inStride[0];
int stridech = inStride[1];
int stridekrow = inStride[2];
int stridekcol = inStride[3];
int striderow = inStride[4];
int stridecol = inStride[5];
int kernelHeight = inShape[2];
int kernelWidth = inShape[3];
// C
int strideY = (int)extraParams[0];
int strideX = (int)extraParams[1];
int padHeight = (int)extraParams[2];
int padWidth = (int)extraParams[3];
int imgHeight = (int)extraParams[4];
int imgWidth = (int)extraParams[5];
int dY = (int)extraParams[6]; //Dilation in height/y dimension
int dX = (int)extraParams[7]; //Dilation in width/x dimension
auto outShape = shape::shapeOf(resultShapeBuffer);
auto resultOrder = shape::order(resultShapeBuffer);
auto outStride = shape::stride(resultShapeBuffer);
int samples = outShape[0];
int depth = outShape[1];
int imgH = outShape[2];
int imgW = outShape[3];
int height_col = inShape[4];//(imgHeight + 2 * padHeight - kernelHeight) / strideX + 1;
int width_col = inShape[5];//(imgWidth + 2 * padWidth - kernelWidth) / strideY + 1;
int n = samples * depth * imgHeight * imgWidth;
/*if (threadIdx.x == 0)
printf("Kernel h: [%i], w: [%i]; Col h: [%i], w: [%i]; Stride x: [%i], y: [%i]; Height: [%i], Width: [%i], Depth: [%i], N: [%i], Samples: [%i]\n",
kernelHeight, kernelWidth, height_col, width_col, strideX, strideY, imgHeight, imgWidth, depth, n, samples);*/
//Effective kernel size, accounting for dilation
int kEffectiveW = kernelWidth + (kernelWidth - 1) * (dX - 1);
int kEffectiveH = kernelHeight + (kernelHeight - 1) * (dY - 1);
for (int i = (blockDim.x * blockIdx.x) + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
T val = 0;
int w_im = i % imgWidth + padWidth;
int h_im = (i / imgWidth) % imgHeight + padHeight;
int c_im = i / (imgWidth * imgHeight);
int num_im = c_im / depth;
int depth_im = c_im % depth;
// compute the start and end of the output
// These are the indexes for dimensions ??? in the 6d col matrix
int w_col_start = (w_im < kEffectiveW) ? 0 : (w_im - kEffectiveW) / strideX + 1;
int w_col_end = nd4j::math::nd4j_min<int>(w_im / strideX + 1, width_col);
int h_col_start = (h_im < kEffectiveH) ? 0 : (h_im - kEffectiveH) / strideY + 1;
int h_col_end = nd4j::math::nd4j_min<int>(h_im / strideY + 1, height_col);
//Iterate over col entries in the 6d array... these are added up
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * strideY);
int w_k = (w_im - w_col * strideX);
if(h_k % dY == 0 && w_k % dX == 0){
h_k /= dY;
w_k /= dX;
int data_col_index = num_im * strideex + depth_im * stridech + h_k * stridekrow + w_k * stridekcol + h_col * striderow + w_col * stridecol;
val += dx[data_col_index];
}
}
}
int i_f = 0;
int i_c = i;
for (int dim = 3; dim >= 0; dim--)
{
i_f += (i_c % outShape[dim]) * outStride[dim];
i_c = i_c / outShape[dim];
}
result[i_f] = val;
}
}
#endif
static void execSpecial(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
const Nd4jLong *inShape = shape::shapeOf(xShapeBuffer);
const Nd4jLong *inStride = shape::stride(xShapeBuffer);
const Nd4jLong *outShape = shape::shapeOf(xShapeBuffer);
const Nd4jLong *outStride = shape::stride(resultShapeBuffer);
const int kH = inShape[2];
const int kW = inShape[3];
const int bS = outShape[0];
const int iC = outShape[1];
const int oH = inShape[4];
const int oW = inShape[5];
const int sH = (int)extraParams[0];
const int sW = (int)extraParams[1];
const int pH = (int)extraParams[2];
const int pW = (int)extraParams[3];
const int iH = (int)extraParams[4];
const int iW = (int)extraParams[5];
const int dH = (int)extraParams[6];
const int dW = (int)extraParams[7];
const int inStride0 = inStride[0];
const int inStride1 = inStride[1];
const int inStride2 = inStride[2];
const int inStride3 = inStride[3];
const int inStride4 = inStride[4];
const int inStride5 = inStride[5];
const int outStride0 = outStride[0];
const int outStride1 = outStride[1];
const int outStride2 = outStride[2];
const int outStride3 = outStride[3];
const T* out0End = result + outStride1 * iC;
const int kRowEnd = -pH + kH * dH;
const int inStepOW = oW * inStride5;
const int kColEnd = -pW + kW * dW;
const int inRowEnd = oH * sH;
const int inColEnd = oW * sW;
int inRowStart, inColStart, inRow, inCol;
T *out0, *out1, *out2;
memset(result, 0, shape::length(resultShapeBuffer) * sizeof(T));
if (shape::order(xShapeBuffer) == 'c' && shape::order(resultShapeBuffer) == 'c' && shape::strideDescendingCAscendingF(xShapeBuffer) && shape::strideDescendingCAscendingF(resultShapeBuffer)) {
#pragma omp parallel for schedule(guided) proc_bind(close) private(out0, out1, out2, inRowStart, inColStart, inRow, inCol)
for (int b = 0; b < bS; b++) {
T *input = dx + (b * inStride0);
out0 = result + (b * outStride0);
for (int channel = 0; channel < iC; ++channel, out0 += outStride1) {
for (int kRow = 0; kRow < kH; ++kRow) {
inRowStart = -pH + kRow * dH;
for (int kCol = 0; kCol < kW; ++kCol) {
inRow = inRowStart;
inColStart = -pW + kCol * dW;
for (int outRow = 0; outRow < oH; ++outRow, inRow += sH) {
if (!is_a_ge_zero_and_a_lt_b(inRow, iH)) {
input += inStepOW;
}
else {
inCol = inColStart;
out1 = out0 + inRow * outStride2;
for (int outCol = 0; outCol < oW; ++outCol, inCol += sW, input += inStride5) {
if (is_a_ge_zero_and_a_lt_b(inCol, iW)) {
out2 = out1 + inCol * outStride3;
*out2 += *input;
}
}
}
}
}
}
}
}
}
else {
T *in0, *in1, *in2, *in3, *in4;
#pragma omp parallel for schedule(guided) proc_bind(close) private(in0, in1, in2, in3, in4, out0, out1, out2, inRowStart, inColStart, inRow, inCol)
for (int b = 0; b < bS; b++) {
out0 = result + (b * outStride0);
in0 = dx + b * inStride0;
for (int channel = 0; channel < iC; ++channel, out0+=outStride1, in0+=inStride1) {
in1 = in0;
for (int kRow = 0; kRow < kH; ++kRow, in1+=inStride2) {
in2 = in1;
inRowStart = -pH + kRow * dH;
for (int kCol = 0; kCol < kW; ++kCol, in2+=inStride3) {
in3 = in2;
inRow = inRowStart;
inColStart = -pW + kCol * dW;
for (int outRow = 0; outRow < oH; ++outRow, inRow+=sH, in3+=inStride4) {
in4 = in3;
if (!is_a_ge_zero_and_a_lt_b(inRow, iH)) {
in4 += inStepOW;
}
else {
inCol = inColStart;
out1 = out0 + inRow * outStride2;
for (int outCol = 0; outCol < oW; ++outCol, inCol+=sW, in4+=inStride5) {
if (is_a_ge_zero_and_a_lt_b(inCol, iW)) {
out2 = out1 + inCol * outStride3;
*out2 += *in4;
}
}
}
}
}
}
}
}
}
}
op_def static T op(T d1, T *params) {
return d1;
}
/** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc
* normally negative indices are bad, OK here because of other checks on input indices
* Uses unrolled loop specifically for length 4
*/
static _CUDA_HD int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) {
int offset = baseOffset;
if (shape[0] != 1) offset += indices[0] * stride[0];
if (shape[1] != 1) offset += indices[1] * stride[1];
if (shape[2] != 1) offset += indices[2] * stride[2];
if (shape[3] != 1) offset += indices[3] * stride[3];
return offset;
}
/** A version of Shape.getOffset without checking on input for negative indices etc
* normally negative indices are bad, OK here because of other checks on input indices
* Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here)
*/
static _CUDA_HD int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) {
int offset = baseOffset;
if (shape[0] != 1) offset += indices[0] * stride[0];
if (shape[1] != 1) offset += indices[1] * stride[1];
if (shape[4] != 1) offset += indices[4] * stride[4];
if (shape[5] != 1) offset += indices[5] * stride[5];
return offset;
}
};
template<typename T>
class Reverse {
public:
static const bool requiresSpecial = true;
#ifdef __CUDACC__
static inline __device__ void execSpecialCuda(T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *zShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
__shared__ Nd4jLong xLength;
__shared__ int xEWS;
__shared__ char xOrder;
__shared__ Nd4jLong sLength;
__shared__ T *shmem;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (threadIdx.x == 0) {
xLength = shape::length(xShapeBuffer);
xEWS = shape::elementWiseStride(xShapeBuffer);
xOrder = shape::order(xShapeBuffer);
sLength = xLength - 1;
extern __shared__ unsigned char shrd[];
shmem = (T *) shrd;
}
__syncthreads();
if (dx == result) {
if (xEWS == 1) {
for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) {
Nd4jLong idx = sLength - e;
T tmp = dx[e];
dx[e] = dx[idx];
dx[idx] = tmp;
}
} else if (xEWS >= 1) {
for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) {
Nd4jLong idx1 = (sLength - e) * xEWS;
Nd4jLong idx2 = e * xEWS;
T tmp = dx[idx2];
dx[idx2] = dx[idx1];
dx[idx1] = tmp;
}
} else {
__shared__ int xRank;
__shared__ Nd4jLong *xShape;
__shared__ Nd4jLong *xStride;
if (threadIdx.x == 0) {
xRank = shape::rank(xShapeBuffer);
xShape = shape::shapeOf(xShapeBuffer);
xStride = shape::stride(xShapeBuffer);
}
__syncthreads();
Nd4jLong xCoord[MAX_RANK];
Nd4jLong zCoord[MAX_RANK];
for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) {
if (xOrder == 'c') {
shape::ind2subC(xRank, xShape, e, xCoord);
shape::ind2subC(xRank, xShape, sLength - e, zCoord);
} else {
shape::ind2sub(xRank, xShape, e, xCoord);
shape::ind2sub(xRank, xShape, sLength - e, zCoord);
}
auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank);
auto zOffset = shape::getOffset(0, xShape, xStride, zCoord, xRank);
result[zOffset] = dx[xOffset];
}
}
} else {
__shared__ int zEWS;
__shared__ char zOrder;
if (threadIdx.x == 0) {
zEWS = shape::elementWiseStride(zShapeBuffer);
zOrder = shape::order(zShapeBuffer);
}
__syncthreads();
if (xEWS == 1 && zEWS == 1 && xOrder == zOrder) {
// loop for whole array
for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) {
result[sLength - e] = dx[e];
}
} else if (xEWS >= 1 && zEWS >= 1 && xOrder == zOrder) {
for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) {
result[(sLength - e) * zEWS] = dx[e * xEWS];
}
} else {
__shared__ int xRank;
__shared__ Nd4jLong *xShape;
__shared__ Nd4jLong *xStride;
__shared__ int zRank;
__shared__ Nd4jLong *zShape;
__shared__ Nd4jLong *zStride;
if (threadIdx.x == 0) {
xRank = shape::rank(xShapeBuffer);
xShape = shape::shapeOf(xShapeBuffer);
xStride = shape::stride(xShapeBuffer);
zRank = shape::rank(zShapeBuffer);
zShape = shape::shapeOf(zShapeBuffer);
zStride = shape::stride(zShapeBuffer);
}
__syncthreads();
Nd4jLong xCoord[MAX_RANK];
Nd4jLong zCoord[MAX_RANK];
for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) {
if (xOrder == 'c') {
shape::ind2subC(xRank, xShape, e, xCoord);
shape::ind2subC(xRank, xShape, sLength - e, zCoord);
} else {
shape::ind2sub(xRank, xShape, e, xCoord);
shape::ind2sub(xRank, xShape, sLength - e, zCoord);
}
auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank);
auto zOffset = shape::getOffset(0, xShape, xStride, zCoord, xRank);
result[zOffset] = dx[xOffset];
}
}
}
}
#endif
static void execSpecial(T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *zShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto xLength = shape::length(xShapeBuffer);
auto xEWS = shape::elementWiseStride(xShapeBuffer);
auto xOrder = shape::order(xShapeBuffer);
auto sLength = xLength - 1;
// two step phase here
if (dx == result) {
if (xEWS == 1) {
#pragma omp parallel for schedule(guided)
for (Nd4jLong e = 0; e < xLength / 2; e++) {
Nd4jLong idx = sLength - e;
T tmp = dx[e];
dx[e] = dx[idx];
dx[idx] = tmp;
}
} else if (xEWS > 1) {
#pragma omp parallel for schedule(guided)
for (Nd4jLong e = 0; e < xLength / 2; e++) {
Nd4jLong idx1 = (sLength - e) * xEWS;
Nd4jLong idx2 = e * xEWS;
T tmp = dx[idx2];
dx[idx2] = dx[idx1];
dx[idx1] = tmp;
}
} else {
int xRank = shape::rank(xShapeBuffer);
auto xShape = shape::shapeOf(xShapeBuffer);
auto xStride = shape::stride(xShapeBuffer);
Nd4jLong xCoord[MAX_RANK];
Nd4jLong zCoord[MAX_RANK];
#pragma omp parallel for private(xCoord, zCoord) schedule(guided)
for (Nd4jLong e = 0; e < xLength / 2; e++) {
if (xOrder == 'c') {
shape::ind2subC(xRank, xShape, e, xCoord);
shape::ind2subC(xRank, xShape, sLength - e, zCoord);
} else {
shape::ind2sub(xRank, xShape, e, xCoord);
shape::ind2sub(xRank, xShape, sLength - e, zCoord);
}
auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank);
auto zOffset = shape::getOffset(0, xShape, xStride, zCoord, xRank);
result[zOffset] = dx[xOffset];
}
}
} else {
// single step phase here
auto zEWS = shape::elementWiseStride(zShapeBuffer);
auto zOrder = shape::order(zShapeBuffer);
if (xEWS == 1 && zEWS == 1 && xOrder == zOrder) {
#pragma omp parallel for schedule(guided)
for (Nd4jLong e = 0; e < xLength; e++) {
result[sLength - e] = dx[e];
}
} else if (xEWS >= 1 && zEWS >= 1 && xOrder == zOrder) {
#pragma omp parallel for schedule(guided)
for (Nd4jLong e = 0; e < xLength; e++) {
result[(sLength - e) * zEWS] = dx[e * xEWS];
}
} else {
auto xRank = shape::rank(xShapeBuffer);
auto xShape = shape::shapeOf(xShapeBuffer);
auto xStride = shape::stride(xShapeBuffer);
auto zRank = shape::rank(zShapeBuffer);
auto zShape = shape::shapeOf(zShapeBuffer);
auto zStride = shape::stride(zShapeBuffer);
Nd4jLong xCoord[MAX_RANK];
Nd4jLong zCoord[MAX_RANK];
#pragma omp parallel for private(xCoord, zCoord) schedule(guided)
for (Nd4jLong e = 0; e < xLength; e++) {
if (xOrder == 'c')
shape::ind2subC(xRank, xShape, e, xCoord);
else
shape::ind2sub(xRank, xShape, e, xCoord);
if (zOrder == 'c')
shape::ind2subC(zRank, zShape, (sLength - e), zCoord);
else
shape::ind2sub(zRank, zShape, (sLength - e), zCoord);
auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank);
auto zOffset = shape::getOffset(0, zShape, zStride, zCoord, zRank);
result[zOffset] = dx[xOffset];
}
}
}
}
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class SoftMax {
public:
static const bool requiresSpecial = true;
#ifdef __CUDACC__
/**
*
*/
static inline __device__ void execSpecialCuda(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams,
int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto shape = shape::shapeOf(xShapeBuffer);
__shared__ T maxResult;
__shared__ Nd4jLong *maxResultShapeBuffer;
auto length = shape::length(xShapeBuffer);
auto stride = shape::stride(xShapeBuffer);
//compute the row wise maxes
__shared__ Nd4jLong maxShape[2];
// it's always 2d here
__shared__ Nd4jLong tempBuffer[8];
if (threadIdx.x == 0) {
maxResult = (T) 0.0;
maxShape[0] = shape[0];
maxShape[1] = 1;
maxResultShapeBuffer = shape::shapeBuffer(2, maxShape, tempBuffer);
}
__syncthreads();
functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr);
__syncthreads();
//subtract max of each row
functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Subtract<T>>(maxResult, dx, xShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager);
__syncthreads();
//after subtracting the row wise maxes take the exp
functions::transform::Transform<T>::template transformCuda<simdOps::Exp<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets);
__syncthreads();
//take the sum for the exponential
functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr);
__syncthreads();
//divide by the sum
functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Divide<T>>(maxResult, result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager);
}
#endif
static void execSpecial(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
if (shape::isMatrix(xShapeBuffer)) {
auto shape = shape::shapeOf(xShapeBuffer);
//iterate along rows
int dimension[1] = { 0 };
int maxDimension[1] = { 1 };
//compute the row wise maxes
std::vector<T> maxResult(shape[0]);
for (int i = 0; i < shape[0]; i++)
maxResult[i] = 0.0;
Nd4jLong maxShape[2] = { shape[0], 1 };
auto maxResultShapeBuffer = shape::shapeBuffer(2, maxShape);
functions::reduce::ReduceFunction<T>::template exec<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1,
nullptr, nullptr);
//subtract max of each row
functions::broadcast::Broadcast<T>::template exec<simdOps::Subtract<T>>(dx, xShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1,
nullptr, nullptr, nullptr, nullptr);
//after subtracting the row wise maxes take the exp
functions::transform::Transform<T>::template exec<simdOps::Exp<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets);
//take the sum for the exponential
functions::reduce::ReduceFunction<T>::template exec<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1,
nullptr, nullptr);
//divide by the sum
functions::broadcast::Broadcast<T>::template exec<simdOps::Divide<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1,
nullptr, nullptr, nullptr, nullptr);
delete[] maxResultShapeBuffer;
}
else if (shape::isVector(xShapeBuffer)) {
T max = -FLOAT_MAX_VALUE;
T sum = 0;
int elementWiseStride = shape::elementWiseStride(xShapeBuffer);
int resultElementWiseStride = shape::elementWiseStride(resultShapeBuffer);
int length = shape::length(xShapeBuffer);
if (elementWiseStride >= 1 && resultElementWiseStride >= 1) {
if (elementWiseStride == 1 && resultElementWiseStride == 1) {
#pragma omp simd reduction(maxT:max)
for (int i = 0; i < length; i++) {
max = nd4j::math::nd4j_max<T>(max, dx[i]);
}
#pragma omp parallel for simd reduction(sumT:sum)
for (int i = 0; i < length; i++) {
result[i] = nd4j::math::nd4j_exp<T>(dx[i] - max);
sum += result[i];
}
#pragma omp simd
for (int i = 0; i < length; i++) {
result[i] /= sum;
}
}
else {
#pragma omp simd reduction(maxT:max)
for (int i = 0; i < length; i++) {
max = nd4j::math::nd4j_max<T>(max, dx[i * elementWiseStride]);
}
#pragma omp parallel for simd reduction(sumT:sum)
for (int i = 0; i < length; i++) {
T r = nd4j::math::nd4j_exp<T>(dx[i * elementWiseStride] - max);
result[i * resultElementWiseStride] = r;
sum += r;
}
#pragma omp simd
for (int i = 0; i < length; i++) {
result[i * resultElementWiseStride] /= sum;
}
}
}
}
}
op_def static T op(T d1, T *params) {
return nd4j::math::softplus<T>(d1);
}
};
template<typename T>
class LogSoftMax {
public:
static const bool requiresSpecial = true;
#ifdef __CUDACC__
/**
*
*/
static inline __device__ void execSpecialCuda(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams,
int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto shape = shape::shapeOf(xShapeBuffer);
auto stride = shape::stride(xShapeBuffer);
//iterate along rows
__shared__ T maxResult;
__shared__ Nd4jLong *maxResultShapeBuffer;
if (threadIdx.x == 0) {
maxResult = (T) 0.0;
}
__syncthreads();
//compute the row wise maxes
Nd4jLong maxShape[2] = { shape[0], 1 };
__shared__ Nd4jLong tempBuffer[8];
if (threadIdx.x == 0)
maxResultShapeBuffer = shape::shapeBuffer(2, maxShape, tempBuffer);
__syncthreads();
functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr);
__syncthreads();
//subtract max of each row
functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Subtract<T>>(maxResult, dx, xShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager);
__syncthreads();
//after subtracting the row wise maxes take the exp
functions::transform::Transform<T>::template transformCuda<simdOps::Exp<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets);
__syncthreads();
//take the sum for the exponential
functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr);
__syncthreads();
//divide by the sum
functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Divide<T>>(maxResult, result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager);
__syncthreads();
functions::transform::Transform<T>::template transformCuda<simdOps::Log<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets);
}
#endif
static void execSpecial(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
if (shape::isMatrix(xShapeBuffer, 2)) {
auto shape = shape::shapeOf(xShapeBuffer);
//iterate along rows
int dimension[1] = { 0 };
int maxDimension[1] = { 1 };
//compute the row wise maxes
std::vector <T> maxResult(shape[0]);
#pragma omp simd
for (int i = 0; i < shape[0]; i++)
maxResult[i] = 0.0;
Nd4jLong maxShape[2] = { shape[0], 1 };
auto maxResultShapeBuffer = shape::shapeBuffer(2, maxShape);
functions::reduce::ReduceFunction<T>::template exec<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1,
nullptr, nullptr);
//subtract max of each row
functions::broadcast::Broadcast<T>::template exec<simdOps::Subtract<T>>(dx, xShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1,
nullptr, nullptr, nullptr, nullptr);
//after subtracting the row wise maxes take the exp
functions::transform::Transform<T>::template exec<simdOps::Exp<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets);
//take the sum for the exponential
functions::reduce::ReduceFunction<T>::template exec<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1,
nullptr, nullptr);
//divide by the sum
functions::broadcast::Broadcast<T>::template exec<simdOps::Divide<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1,
nullptr, nullptr, nullptr, nullptr);
functions::transform::Transform<T>::template exec<simdOps::Log<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets);
delete[] maxResultShapeBuffer;
}
else if (shape::isVector(xShapeBuffer, 2)) {
T max = -FLOAT_MAX_VALUE;
T sum = 0;
auto elementWiseStride = shape::elementWiseStride(xShapeBuffer);
auto length = shape::length(xShapeBuffer);
if (elementWiseStride == 1) {
#pragma omp simd reduction(maxT:max)
for (int i = 0; i < length; i++) {
max = nd4j::math::nd4j_max<T>(max, result[i]);
}
#pragma omp simd reduction(sumT:sum)
for (int i = 0; i < length; i++) {
result[i] = nd4j::math::nd4j_exp<T>(dx[i] - max);
sum += result[i];
}
#pragma omp simd
for (int i = 0; i < length; i++) {
result[i] /= sum;
result[i] = nd4j::math::nd4j_log<T>(result[i]);
}
}
else if (elementWiseStride > 1) {
#pragma omp simd reduction(maxT:max)
for (int i = 0; i < length; i++) {
max = nd4j::math::nd4j_max<T>(max, result[i * elementWiseStride]);
}
#pragma omp simd reduction(sumT:sum)
for (int i = 0; i < length; i++) {
result[i * elementWiseStride] = nd4j::math::nd4j_exp<T>(dx[i * elementWiseStride] - max);
sum += result[i * elementWiseStride];
}
#pragma omp simd
for (int i = 0; i < length; i++) {
result[i * elementWiseStride] /= sum;
result[i * elementWiseStride] = nd4j::math::nd4j_log<T>(result[i * elementWiseStride]);
}
}
}
}
op_def static T op(T d1, T *params) {
return nd4j::math::softplus<T>(d1);
}
};
/**
* softmax(x)
*/
template<typename T>
class SoftMaxDerivative {
public:
static const bool requiresSpecial = true;
#ifdef __CUDACC__
/**
*
*/
static inline __device__ void execSpecialCuda(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams,
int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto shape = shape::shapeOf(xShapeBuffer);
__shared__ T maxResult;
__shared__ Nd4jLong *maxResultShapeBuffer;
__shared__ Nd4jLong resultEWS;
auto length = shape::length(xShapeBuffer);
if (threadIdx.x == 0) {
resultEWS = shape::elementWiseStride(resultShapeBuffer);
maxResult = (T) 0.0;
}
__syncthreads();
auto tride = shape::stride(xShapeBuffer);
Nd4jLong maxShape[2] = { shape[0], 1 };
__shared__ Nd4jLong tempBuffer[8];
if (threadIdx.x == 0)
maxResultShapeBuffer = shape::shapeBuffer(2, maxShape, tempBuffer);
__syncthreads();
functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr);
__syncthreads();
//subtract max of each row
functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Subtract<T>>(maxResult, dx, xShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager);
__syncthreads();
//after subtracting the row wise maxes take the exp
functions::transform::Transform<T>::template transformCuda<simdOps::Exp<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets);
__syncthreads();
//take the sum for the exponential
functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr);
__syncthreads();
//divide by the sum
functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Divide<T>>(maxResult, result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager);
__syncthreads();
if (resultEWS >= 1) {
for (int i = threadIdx.x; i < length; i += blockDim.x) {
result[i * resultEWS] = result[i * resultEWS] * ((T) 1.0 - result[i * resultEWS]);
}
}
else {
printf("Non element wise stride not supported right now\n");
}
}
#endif
static void execSpecial(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
if (shape::isMatrix(xShapeBuffer, 2)) {
auto shape = shape::shapeOf(xShapeBuffer);
auto resultEleStide = shape::elementWiseStride(resultShapeBuffer);
//iterate along rows
int dimension[1] = { 0 };
int maxDimension[1] = { 1 };
auto len = shape::length(xShapeBuffer);
//compute the row wise maxes
std::vector <T> maxResult(shape[0]);
#pragma omp simd
for (int i = 0; i < shape[0]; i++)
maxResult[i] = 0.0;
Nd4jLong maxShape[2] = { shape[0], 1 };
auto maxResultShapeBuffer = shape::shapeBuffer(2, maxShape);
functions::reduce::ReduceFunction<T>::template exec<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1,
nullptr, nullptr);
//subtract max of each row
functions::broadcast::Broadcast<T>::template exec<simdOps::Subtract<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1,
nullptr, nullptr, nullptr, nullptr);
//after subtracting the row wise maxes take the exp
functions::transform::Transform<T>::template exec<simdOps::Exp<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets);
//take the sum for the exponential
functions::reduce::ReduceFunction<T>::template exec<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension,
1, nullptr, nullptr);
//divide by the sum
functions::broadcast::Broadcast<T>::template exec<simdOps::Divide<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr);
if (resultEleStide >= 1) {
if (resultEleStide == 1) {
#pragma omp simd
for (int i = 0; i < len; i++) {
result[i] = result[i] * ((T) 1.0f - result[i]);
}
}
else {
#pragma omp simd
for (int i = 0; i < len; i++) {
result[i * resultEleStide] = result[i * resultEleStide] * ((T) 1.0f - result[i * resultEleStide]);
}
}
}
else {
auto zShape = shape::shapeOf(resultShapeBuffer);
auto zStride = shape::stride(resultShapeBuffer);
auto zRank = shape::rank(resultShapeBuffer);
Nd4jLong zCoord[MAX_RANK];
for (int i = 0; i < len; i++) {
shape::ind2subC(zRank,zShape, i, zCoord);
Nd4jLong zOffset = shape::getOffset(0, zShape, zStride, zCoord, zRank);
result[zOffset] = result[zOffset] * ((T) 1.0f - result[zOffset]);
}
}
delete[] maxResultShapeBuffer;
}
else if (shape::isVector(xShapeBuffer, 2)) {
T max = -FLOAT_MAX_VALUE;
T sum = 0;
auto elementWiseStride = shape::elementWiseStride(xShapeBuffer);
auto length = shape::length(xShapeBuffer);
if (elementWiseStride == 1) {
#pragma omp simd reduction(maxT:max)
for (int i = 0; i < length; i++) {
max = nd4j::math::nd4j_max<T>(max, result[i]);
}
#pragma omp simd reduction(sumT:sum)
for (int i = 0; i < length; i++) {
result[i] -= max;
result[i] = nd4j::math::nd4j_exp<T>(result[i]);
sum += result[i];
}
#pragma omp simd
for (int i = 0; i < length; i++) {
result[i] /= sum;
}
#pragma omp simd
for (int i = 0; i < length; i++) {
result[i] = result[i] * ((T) 1.0f - result[i]);
}
} else if (elementWiseStride >= 1) {
#pragma omp simd reduction(maxT:max)
for (int i = 0; i < length; i++) {
max = nd4j::math::nd4j_max<T>(max, result[i * elementWiseStride]);
}
#pragma omp simd reduction(sumT:sum)
for (int i = 0; i < length; i++) {
result[i * elementWiseStride] -= max;
result[i * elementWiseStride] = nd4j::math::nd4j_exp<T>(result[i * elementWiseStride]);
sum += result[i * elementWiseStride];
}
#pragma omp simd
for (int i = 0; i < length; i++) {
result[i * elementWiseStride] /= sum;
}
#pragma omp simd
for (int i = 0; i < length; i++) {
result[i * elementWiseStride] = result[i * elementWiseStride] * ((T) 1.0f - result[i * elementWiseStride]);
}
} else {
printf("non-ews access on row not implemented yet");
}
}
}
op_def static T op(T d1, T *params) {
return nd4j::math::softplus<T>(d1);
}
};
template<typename T>
class IsMax {
public:
static const bool requiresSpecial = true;
#ifdef __CUDACC__
static inline __device__ void doAllCuda(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams,
int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager) {
// this code is safe to delete, it's never used
/*
__shared__ int maxIdx;
__shared__ int length;
if (threadIdx.x == 0) {
length = shape::length(resultShapeBuffer);
}
__syncthreads();
functions::indexreduce::IndexReduce<T>::template transform<simdOps::IndexMax<T>>(
dx,
xShapeBuffer,
extraParams,
result,
resultShapeBuffer,
nullptr,
1,
1, allocationPointer, reductionPointer, manager, nullptr, nullptr);
__syncthreads();
if (threadIdx.x == 0)
maxIdx = (int)result[0];
__syncthreads();
for (int i = threadIdx.x; i < length; i += blockDim.x)
result[i] = 0;
__syncthreads();
if (threadIdx.x == 0) {
result[maxIdx] = 1.0;
}
*/
}
#endif
#ifdef __CUDACC__
inline __host__
#elif defined(__GNUC__)
#endif
static void doAll(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams) {
auto length = shape::length(xShapeBuffer);
auto eleStride = shape::elementWiseStride(xShapeBuffer);
auto resultEleStride = shape::elementWiseStride(resultShapeBuffer);
auto xOrder = shape::order(xShapeBuffer);
auto resultOrder = shape::order(resultShapeBuffer);
/*
int tadsPerThread = tads / TAD_THRESHOLD;
int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread);
num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads());
*/
if (xOrder == resultOrder && xOrder == 'c') {
if (eleStride == 1 && resultEleStride == 1) {
if (length < ELEMENT_THRESHOLD) {
int maxIdx = 0;
T currMax = dx[0];
//#pragma omp simd reduction (max:maxIdx,currMax)
for (int i = 0; i < length; i++) {
if (currMax < dx[i]) {
currMax = dx[i];
maxIdx = i;
}
result[i] = 0.0;
}
result[maxIdx] = 1.0;
}
else {
int maxIdx = 0;
T currMax = dx[0];
#pragma omp parallel proc_bind(AFFINITY)
{
int maxIdxLocal = maxIdx;
T currMaxLocal = currMax;
//#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal)
for (int i = 0; i < length; i++) {
if (currMaxLocal < dx[i]) {
currMaxLocal = dx[i];
maxIdxLocal = i;
}
result[i] = 0.0;
}
#pragma omp critical
{
if (currMax < currMaxLocal) {
currMax = currMaxLocal;
maxIdx = maxIdxLocal;
}
}
}
result[maxIdx] = 1.0;
}
}
else {
if (length < ELEMENT_THRESHOLD) {
int maxIdx = 0;
T currMax = dx[0];
//#pragma omp simd reduction(max:maxIdx,currMax)
for (int i = 0; i < length; i++) {
result[i * resultEleStride] = 0.0;
if (currMax < dx[i * eleStride]) {
currMax = dx[i * eleStride];
maxIdx = i;
}
}
result[maxIdx * resultEleStride] = 1.0;
}
else {
int maxIdx = 0;
T currMax = dx[0];
#pragma omp parallel proc_bind(AFFINITY) default(shared)
{
int maxIdxLocal = maxIdx;
T currMaxLocal = currMax;
//#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal)
for (int i = 0; i < length; i++) {
result[i * resultEleStride] = 0.0;
if (currMaxLocal < dx[i * eleStride]) {
currMaxLocal = dx[i * eleStride];
maxIdxLocal = i;
}
}
#pragma omp critical
{
if (currMax < currMaxLocal) {
currMax = currMaxLocal;
maxIdx = maxIdxLocal;
}
}
}
result[maxIdx * resultEleStride] = 1.0;
}
}
}
else {
Nd4jLong shapeIter[MAX_RANK];
Nd4jLong coord[MAX_RANK];
int dim;
Nd4jLong xStridesIter[MAX_RANK];
Nd4jLong resultStridesIter[MAX_RANK];
auto xShape = shape::shapeOf(xShapeBuffer);
auto xStride = shape::stride(xShapeBuffer);
auto resultStride = shape::stride(resultShapeBuffer);
auto rank = shape::rank(xShapeBuffer);
T *originalResult = result;
if (PrepareTwoRawArrayIter<T>(rank,
xShape,
dx,
xStride,
result,
resultStride,
&rank,
shapeIter,
&dx,
xStridesIter,
&result,
resultStridesIter) >= 0) {
T value = dx[0];
int idx = 0;
int maxIdx = 0;
ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); {
if (dx[0] > value) {
value = dx[0];
maxIdx = idx;
}
idx++;
result[0] = 0.0;
}
ND4J_RAW_ITER_TWO_NEXT(
dim,
rank,
coord,
shapeIter,
dx,
xStridesIter,
result,
resultStridesIter);
//pointer to where max value would be
if (shape::order(resultShapeBuffer) == 'c' || (shape::order(resultShapeBuffer) == 'f' &&
maxIdx * shape::stride(resultShapeBuffer)[shape::rank(resultShapeBuffer) - 1] >=
shape::length(resultShapeBuffer)))
originalResult[maxIdx] = 1.0;
else
originalResult[maxIdx * shape::stride(resultShapeBuffer)[shape::rank(resultShapeBuffer) - 1]] = 1.0;
}
}
}
public:
#ifdef __CUDACC__
/**
*
*/
static inline __device__ void execSpecialCuda(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
// FIXME: MAX_DIMENSION is lower then FP16 frame
if (extraParams == nullptr || (int) extraParams[0] == MAX_DIMENSION) {
doAllCuda(dx, xShapeBuffer, result, resultShapeBuffer, extraParams, allocationPointer, reductionPointer, manager);
}
}
#endif
static void execSpecial(
T *dx,
Nd4jLong *xShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
//FIXME: this op should be moved to CustomOps
if (extraParams == nullptr || (int)extraParams[0] == 0 ||
((int)extraParams[0] == 1 && (int)extraParams[1] == MAX_DIMENSION)) {
doAll(dx, xShapeBuffer, result, resultShapeBuffer, extraParams);
}
else if (shape::isVector(xShapeBuffer)) {
auto dimensionLength = (int)extraParams[0];
auto dimension = new int[dimensionLength];
auto length = shape::length(xShapeBuffer);
for (int i = 0; i < dimensionLength; i++) {
dimension[i] = (int)extraParams[i + 1];
}
if (shape::shapeOf(xShapeBuffer)[dimension[0]] == 1) {
for (int i = 0; i < length; i++) {
result[i] = 1.0;
}
}
else {
auto eleStride = shape::elementWiseStride(xShapeBuffer);
if (eleStride == 1) {
int maxIdx = 0;
T currMax = dx[0];
if (length < ELEMENT_THRESHOLD) {
//#pragma omp simd reduction(max:maxIdx,currMax)
for (int i = 0; i < length; i++) {
if (currMax < dx[i]) {
currMax = dx[i];
maxIdx = i;
}
result[i] = 0.0;
}
}
else {
#pragma omp parallel proc_bind(AFFINITY) default(shared)
{
int maxIdxLocal = maxIdx;
T currMaxLocal = currMax;
//#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal)
for (int i = 0; i < length; i++) {
if (currMaxLocal < dx[i]) {
currMaxLocal = dx[i];
maxIdxLocal = i;
}
result[i] = 0.0;
}
#pragma omp critical
{
if (currMax < currMaxLocal) {
currMax = currMaxLocal;
maxIdx = maxIdxLocal;
}
}
}
}
result[maxIdx] = 1.0;
}
else {
int maxIdx = 0;
T currMax = dx[0];
if (length < ELEMENT_THRESHOLD) {
//#pragma omp parallel for reduction(max:maxIdx,currMax) proc_bind(AFFINITY)
for (int i = 0; i < length; i++) {
if (currMax < dx[i * eleStride]) {
currMax = dx[i * eleStride];
maxIdx = i;
}
result[i] = 0.0;
}
}
else {
#pragma omp parallel proc_bind(AFFINITY) default(shared)
{
int maxIdxLocal = maxIdx;
T currMaxLocal = currMax;
//#pragma omp parallel for reduction(max:maxIdx,currMax) proc_bind(AFFINITY)
for (int i = 0; i < length; i++) {
if (currMaxLocal < dx[i * eleStride]) {
currMaxLocal = dx[i * eleStride];
maxIdxLocal = i;
}
result[i] = 0.0;
}
#pragma omp critical
{
if (currMax < currMaxLocal) {
currMax = currMaxLocal;
maxIdx = maxIdxLocal;
}
}
}
}
result[maxIdx] = 1.0;
}
}
}
else {
auto dimensionLength = (int) extraParams[0];
auto dimension = new int[dimensionLength];
#pragma omp simd
for (int i = 0; i < dimensionLength; i++) {
dimension[i] = (int) extraParams[i + 1];
}
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
auto tadShapeShapeInfo = tadShapeInfo;
shape::TAD tad (xShapeBuffer, dimension, dimensionLength);
if(tadShapeInfo==nullptr) {
tad.createTadOnlyShapeInfo();
tad.createOffsets();
tadShapeShapeInfo = tad.tadOnlyShapeInfo;
tadOffsets = tad.tadOffsets;
}
auto tadLength = shape::tadLength(xShapeBuffer, dimension, dimensionLength);
auto tads = shape::length(xShapeBuffer) / tadLength;
int tadsPerThread = tads / TAD_THRESHOLD;
int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread);
num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads());
auto tadEWS = shape::elementWiseStride(tadShapeShapeInfo);
auto zEWS = tadEWS;
int span = (tads / num_threads) + 8;
#pragma omp parallel num_threads(num_threads) if (num_threads>1) proc_bind(AFFINITY)
{
int tid = omp_get_thread_num();
int start = span * tid;
int end = span * (tid + 1);
if (end > tads) end = tads;
for (int r = start; r < end; r++) {
if (tadEWS > 0 && zEWS > 0 && dimensionLength == 1) {
T *rX = dx + tadOffsets[r];
T *rZ = result + tadOffsets[r];
T maxValue = rX[0];
int maxIdx = 0;
if (tadEWS == 1 && zEWS == 1) {
//#pragma omp simd reduction(max:maxValue,maxIdx)
for (int i = 0; i < tadLength; i++) {
if (rX[i] > maxValue) {
maxIdx = i;
maxValue = rX[i];
}
}
#pragma omp simd
for (int i = 0; i < tadLength; i++) {
rZ[i] = maxIdx == i ? (T) 1.0 : (T) 0.0;
}
} else {
//#pragma omp parallel for reduction(max:maxValue,maxIdx) default(shared)
for (int i = 0; i < tadLength; i++) {
if (rX[i * tadEWS] > maxValue) {
maxIdx = i;
maxValue = rX[i * tadEWS];
}
}
#pragma omp simd
for (int i = 0; i < tadLength; i++) {
rZ[i * zEWS] = maxIdx == i ? (T) 1.0 : (T) 0.0;
}
}
} else {
int tadsPerThread = tads / TAD_THRESHOLD;
int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread);
num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads());
auto offset = tadOffsets[r];
Nd4jLong shapeIter[MAX_RANK];
Nd4jLong coord[MAX_RANK];
int dim;
Nd4jLong xStridesIter[MAX_RANK];
Nd4jLong resultStridesIter[MAX_RANK];
auto xShape = shape::shapeOf(tadShapeShapeInfo);
auto xStride = shape::stride(tadShapeShapeInfo);
auto resultStride = shape::stride(tadShapeShapeInfo);
int rank = shape::rank(tadShapeShapeInfo);
T *xPointer = dx + offset;
T *resultPointer = result + offset;
T maxValue = xPointer[0];
T *maxCursor = resultPointer;
Nd4jPointer maxCursorLong = reinterpret_cast<Nd4jPointer>(maxCursor);
if (PrepareTwoRawArrayIter<T>(rank,
xShape,
xPointer,
xStride,
resultPointer,
resultStride,
&rank,
shapeIter,
&xPointer,
xStridesIter,
&resultPointer,
resultStridesIter) >= 0) {
ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); {
if (maxValue < xPointer[0]) {
maxCursor = resultPointer;
maxCursorLong = reinterpret_cast<Nd4jPointer>(resultPointer);
maxValue = xPointer[0];
}
resultPointer[0] = 0.0;
}
ND4J_RAW_ITER_TWO_NEXT(dim,
rank,
coord,
shapeIter,
xPointer,
xStridesIter,
resultPointer,
resultStridesIter);
maxCursor = reinterpret_cast<T *>(maxCursorLong);
maxCursor[0] = 1.0;
}
}
}
}
delete[] dimension;
}
}
op_def static T op(T d1, T *params) {
return nd4j::math::softplus<T>(d1);
}
};
}
|
GB_unaryop__minv_bool_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_int32
// op(A') function: GB_tran__minv_bool_int32
// C type: bool
// A type: int32_t
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_int32
(
bool *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__log2_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log2_fc32_fc32)
// op(A') function: GB (_unop_tran__log2_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_clog2f (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog2f (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_clog2f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log2_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog2f (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog2f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log2_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(4*t2-Nz,4));t3<=min(min(min(floord(4*t2+Ny,4),floord(Nt+Ny-4,4)),floord(2*t1+Ny+1,4)),floord(4*t1-4*t2+Nz+Ny-1,4));t3++) {
for (t4=max(max(max(0,ceild(t1-255,256)),ceild(4*t2-Nz-508,512)),ceild(4*t3-Ny-508,512));t4<=min(min(min(min(floord(4*t2+Nx,512),floord(4*t3+Nx,512)),floord(Nt+Nx-4,512)),floord(2*t1+Nx+1,512)),floord(4*t1-4*t2+Nz+Nx-1,512));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),4*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),4*t3+2),512*t4+510),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(512*t4,t5+1);
ubv=min(512*t4+511,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
f59950_gcc_so8.c | #define _POSIX_C_SOURCE 200809L
#define START_TIMER(S) \
struct timeval start_##S, end_##S; \
gettimeofday(&start_##S, NULL);
#define STOP_TIMER(S, T) \
gettimeofday(&end_##S, NULL); \
T->S += (double)(end_##S.tv_sec - start_##S.tv_sec) + (double)(end_##S.tv_usec - start_##S.tv_usec) / 1000000;
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include <stdio.h>
#include "omp.h"
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
};
int Kernel(struct dataobj *restrict block_sizes_vec, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_fxx_vec, struct dataobj *restrict save_src_fyy_vec, struct dataobj *restrict save_src_fzz_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict tau_sol_xx_vec, struct dataobj *restrict tau_sol_xy_vec, struct dataobj *restrict tau_sol_xz_vec, struct dataobj *restrict tau_sol_yy_vec, struct dataobj *restrict tau_sol_yz_vec, struct dataobj *restrict tau_sol_zz_vec, struct dataobj *restrict v_sol_x_vec, struct dataobj *restrict v_sol_y_vec, struct dataobj *restrict v_sol_z_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine, struct profiler *timers)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_fxx)[save_src_fxx_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fxx_vec->size[1]])save_src_fxx_vec->data;
float(*restrict save_src_fyy)[save_src_fyy_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fyy_vec->size[1]])save_src_fyy_vec->data;
float(*restrict save_src_fzz)[save_src_fzz_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fzz_vec->size[1]])save_src_fzz_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict tau_sol_xx)[tau_sol_xx_vec->size[1]][tau_sol_xx_vec->size[2]][tau_sol_xx_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xx_vec->size[1]][tau_sol_xx_vec->size[2]][tau_sol_xx_vec->size[3]])tau_sol_xx_vec->data;
float(*restrict tau_sol_xy)[tau_sol_xy_vec->size[1]][tau_sol_xy_vec->size[2]][tau_sol_xy_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xy_vec->size[1]][tau_sol_xy_vec->size[2]][tau_sol_xy_vec->size[3]])tau_sol_xy_vec->data;
float(*restrict tau_sol_xz)[tau_sol_xz_vec->size[1]][tau_sol_xz_vec->size[2]][tau_sol_xz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xz_vec->size[1]][tau_sol_xz_vec->size[2]][tau_sol_xz_vec->size[3]])tau_sol_xz_vec->data;
float(*restrict tau_sol_yy)[tau_sol_yy_vec->size[1]][tau_sol_yy_vec->size[2]][tau_sol_yy_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_yy_vec->size[1]][tau_sol_yy_vec->size[2]][tau_sol_yy_vec->size[3]])tau_sol_yy_vec->data;
float(*restrict tau_sol_yz)[tau_sol_yz_vec->size[1]][tau_sol_yz_vec->size[2]][tau_sol_yz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_yz_vec->size[1]][tau_sol_yz_vec->size[2]][tau_sol_yz_vec->size[3]])tau_sol_yz_vec->data;
float(*restrict tau_sol_zz)[tau_sol_zz_vec->size[1]][tau_sol_zz_vec->size[2]][tau_sol_zz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_zz_vec->size[1]][tau_sol_zz_vec->size[2]][tau_sol_zz_vec->size[3]])tau_sol_zz_vec->data;
float(*restrict v_sol_x)[v_sol_x_vec->size[1]][v_sol_x_vec->size[2]][v_sol_x_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_x_vec->size[1]][v_sol_x_vec->size[2]][v_sol_x_vec->size[3]])v_sol_x_vec->data;
float(*restrict v_sol_y)[v_sol_y_vec->size[1]][v_sol_y_vec->size[2]][v_sol_y_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_y_vec->size[1]][v_sol_y_vec->size[2]][v_sol_y_vec->size[3]])v_sol_y_vec->data;
float(*restrict v_sol_z)[v_sol_z_vec->size[1]][v_sol_z_vec->size[2]][v_sol_z_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_z_vec->size[1]][v_sol_z_vec->size[2]][v_sol_z_vec->size[3]])v_sol_z_vec->data;
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
int xb_size = block_sizes[0];
int yb_size = block_sizes[1];
int x0_blk0_size = block_sizes[2];
int y0_blk0_size = block_sizes[3];
int sf = 8;
int t_blk_size = 2 * sf * (time_M - time_m);
/* int xb_size = 64; int yb_size = 64; x0_blk0_size = 8; y0_blk0_size = 8; */
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
/* Begin section0 */
START_TIMER(section0)
for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size)
{
//printf(" Change of outer xblock %d \n", xb);
for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size)
{
for (int time = t_blk, t0 = (time) % (2), t1 = (time + 1) % (2); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (2), t1 = (((time / sf) % (time_M - time_m + 1))) % (2))
{
int tw = ((time / sf) % (time_M - time_m + 1));
#pragma omp parallel num_threads(nthreads)
{
//printf(" Change of time block : %d \n", tw);
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
//printf(" Change of inner xblock %d \n", x0_blk0);
for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
//printf(" Updating velocity x %d \n", x - time + 4);
//printf(" \n PDE update : \n");
#pragma omp simd aligned(tau_sol_xx, tau_sol_xz, tau_sol_zz, v_sol_x, v_sol_z : 64)
for (int z = z_m; z <= z_M; z += 1)
{
//printf(" Updating velocity x %d z: %d \n", x - time + 4, z + 4);
float r26 = 1.0 / h_z;
float r25 = 1.0 / h_y;
float r24 = 1.0 / h_x;
v_sol_x[t1][x - time + 8][y - time + 8][z + 8] = r24 * (4.56702358488521e-4F * (tau_sol_xx[t0][x - time + 5][y - time + 8][z + 8] - tau_sol_xx[t0][x - time + 12][y - time + 8][z + 8]) + 6.2659563586471e-3F * (-tau_sol_xx[t0][x - time + 6][y - time + 8][z + 8] + tau_sol_xx[t0][x - time + 11][y - time + 8][z + 8]) + 5.22163029879319e-2F * (tau_sol_xx[t0][x - time + 7][y - time + 8][z + 8] - tau_sol_xx[t0][x - time + 10][y - time + 8][z + 8]) + 7.8324454477134e-1F * (-tau_sol_xx[t0][x - time + 8][y - time + 8][z + 8] + tau_sol_xx[t0][x - time + 9][y - time + 8][z + 8])) + r25 * (4.56702358488521e-4F * (tau_sol_xy[t0][x - time + 8][y - time + 4][z + 8] - tau_sol_xy[t0][x - time + 8][y - time + 11][z + 8]) + 6.2659563586471e-3F * (-tau_sol_xy[t0][x - time + 8][y - time + 5][z + 8] + tau_sol_xy[t0][x - time + 8][y - time + 10][z + 8]) + 5.22163029879319e-2F * (tau_sol_xy[t0][x - time + 8][y - time + 6][z + 8] - tau_sol_xy[t0][x - time + 8][y - time + 9][z + 8]) + 7.8324454477134e-1F * (-tau_sol_xy[t0][x - time + 8][y - time + 7][z + 8] + tau_sol_xy[t0][x - time + 8][y - time + 8][z + 8])) + r26 * (4.56702358488521e-4F * (tau_sol_xz[t0][x - time + 8][y - time + 8][z + 4] - tau_sol_xz[t0][x - time + 8][y - time + 8][z + 11]) + 6.2659563586471e-3F * (-tau_sol_xz[t0][x - time + 8][y - time + 8][z + 5] + tau_sol_xz[t0][x - time + 8][y - time + 8][z + 10]) + 5.22163029879319e-2F * (tau_sol_xz[t0][x - time + 8][y - time + 8][z + 6] - tau_sol_xz[t0][x - time + 8][y - time + 8][z + 9]) + 7.8324454477134e-1F * (-tau_sol_xz[t0][x - time + 8][y - time + 8][z + 7] + tau_sol_xz[t0][x - time + 8][y - time + 8][z + 8])) + v_sol_x[t0][x - time + 8][y - time + 8][z + 8];
v_sol_y[t1][x - time + 8][y - time + 8][z + 8] = r24 * (4.56702358488521e-4F * (tau_sol_xy[t0][x - time + 4][y - time + 8][z + 8] - tau_sol_xy[t0][x - time + 11][y - time + 8][z + 8]) + 6.2659563586471e-3F * (-tau_sol_xy[t0][x - time + 5][y - time + 8][z + 8] + tau_sol_xy[t0][x - time + 10][y - time + 8][z + 8]) + 5.22163029879319e-2F * (tau_sol_xy[t0][x - time + 6][y - time + 8][z + 8] - tau_sol_xy[t0][x - time + 9][y - time + 8][z + 8]) + 7.8324454477134e-1F * (-tau_sol_xy[t0][x - time + 7][y - time + 8][z + 8] + tau_sol_xy[t0][x - time + 8][y - time + 8][z + 8])) + r25 * (4.56702358488521e-4F * (tau_sol_yy[t0][x - time + 8][y - time + 5][z + 8] - tau_sol_yy[t0][x - time + 8][y - time + 12][z + 8]) + 6.2659563586471e-3F * (-tau_sol_yy[t0][x - time + 8][y - time + 6][z + 8] + tau_sol_yy[t0][x - time + 8][y - time + 11][z + 8]) + 5.22163029879319e-2F * (tau_sol_yy[t0][x - time + 8][y - time + 7][z + 8] - tau_sol_yy[t0][x - time + 8][y - time + 10][z + 8]) + 7.8324454477134e-1F * (-tau_sol_yy[t0][x - time + 8][y - time + 8][z + 8] + tau_sol_yy[t0][x - time + 8][y - time + 9][z + 8])) + r26 * (4.56702358488521e-4F * (tau_sol_yz[t0][x - time + 8][y - time + 8][z + 4] - tau_sol_yz[t0][x - time + 8][y - time + 8][z + 11]) + 6.2659563586471e-3F * (-tau_sol_yz[t0][x - time + 8][y - time + 8][z + 5] + tau_sol_yz[t0][x - time + 8][y - time + 8][z + 10]) + 5.22163029879319e-2F * (tau_sol_yz[t0][x - time + 8][y - time + 8][z + 6] - tau_sol_yz[t0][x - time + 8][y - time + 8][z + 9]) + 7.8324454477134e-1F * (-tau_sol_yz[t0][x - time + 8][y - time + 8][z + 7] + tau_sol_yz[t0][x - time + 8][y - time + 8][z + 8])) + v_sol_y[t0][x - time + 8][y - time + 8][z + 8];
v_sol_z[t1][x - time + 8][y - time + 8][z + 8] = r24 * (4.56702358488521e-4F * (tau_sol_xz[t0][x - time + 4][y - time + 8][z + 8] - tau_sol_xz[t0][x - time + 11][y - time + 8][z + 8]) + 6.2659563586471e-3F * (-tau_sol_xz[t0][x - time + 5][y - time + 8][z + 8] + tau_sol_xz[t0][x - time + 10][y - time + 8][z + 8]) + 5.22163029879319e-2F * (tau_sol_xz[t0][x - time + 6][y - time + 8][z + 8] - tau_sol_xz[t0][x - time + 9][y - time + 8][z + 8]) + 7.8324454477134e-1F * (-tau_sol_xz[t0][x - time + 7][y - time + 8][z + 8] + tau_sol_xz[t0][x - time + 8][y - time + 8][z + 8])) + r25 * (4.56702358488521e-4F * (tau_sol_yz[t0][x - time + 8][y - time + 4][z + 8] - tau_sol_yz[t0][x - time + 8][y - time + 11][z + 8]) + 6.2659563586471e-3F * (-tau_sol_yz[t0][x - time + 8][y - time + 5][z + 8] + tau_sol_yz[t0][x - time + 8][y - time + 10][z + 8]) + 5.22163029879319e-2F * (tau_sol_yz[t0][x - time + 8][y - time + 6][z + 8] - tau_sol_yz[t0][x - time + 8][y - time + 9][z + 8]) + 7.8324454477134e-1F * (-tau_sol_yz[t0][x - time + 8][y - time + 7][z + 8] + tau_sol_yz[t0][x - time + 8][y - time + 8][z + 8])) + r26 * (4.56702358488521e-4F * (tau_sol_zz[t0][x - time + 8][y - time + 8][z + 5] - tau_sol_zz[t0][x - time + 8][y - time + 8][z + 12]) + 6.2659563586471e-3F * (-tau_sol_zz[t0][x - time + 8][y - time + 8][z + 6] + tau_sol_zz[t0][x - time + 8][y - time + 8][z + 11]) + 5.22163029879319e-2F * (tau_sol_zz[t0][x - time + 8][y - time + 8][z + 7] - tau_sol_zz[t0][x - time + 8][y - time + 8][z + 10]) + 7.8324454477134e-1F * (-tau_sol_zz[t0][x - time + 8][y - time + 8][z + 8] + tau_sol_zz[t0][x - time + 8][y - time + 8][z + 9])) + v_sol_z[t0][x - time + 8][y - time + 8][z + 8];
}
}
}
}
}
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb - 2); x0_blk0 <= +min((x_M + time), (xb - 2 + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb - 2); y0_blk0 <= +min((y_M + time), (yb - 2 + yb_size)); y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0; x <= min(min((x_M + time), (xb - 2 + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
for (int y = y0_blk0; y <= min(min((y_M + time), (yb - 2 + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
//printf(" Updating stress x %d \n", x - time + 4);
#pragma omp simd aligned(tau_sol_xx, tau_sol_xz, tau_sol_zz, v_sol_x, v_sol_z : 64)
for (int z = z_m; z <= z_M; z += 1)
{
//printf(" Updating x %d z: %d \n", x - time + 4, z + 4);
float r47 = -v_sol_z[t1][x - time + 8][y - time + 8][z + 8];
float r46 = -v_sol_y[t1][x - time + 8][y - time + 8][z + 8];
float r45 = -v_sol_x[t1][x - time + 8][y - time + 8][z + 8];
float r44 = -v_sol_y[t1][x - time + 8][y - time + 7][z + 8] + v_sol_y[t1][x - time + 8][y - time + 8][z + 8];
float r43 = v_sol_y[t1][x - time + 8][y - time + 6][z + 8] - v_sol_y[t1][x - time + 8][y - time + 9][z + 8];
float r42 = -v_sol_y[t1][x - time + 8][y - time + 5][z + 8] + v_sol_y[t1][x - time + 8][y - time + 10][z + 8];
float r41 = v_sol_y[t1][x - time + 8][y - time + 4][z + 8] - v_sol_y[t1][x - time + 8][y - time + 11][z + 8];
float r40 = -v_sol_z[t1][x - time + 8][y - time + 8][z + 7] + v_sol_z[t1][x - time + 8][y - time + 8][z + 8];
float r39 = v_sol_z[t1][x - time + 8][y - time + 8][z + 6] - v_sol_z[t1][x - time + 8][y - time + 8][z + 9];
float r38 = -v_sol_z[t1][x - time + 8][y - time + 8][z + 5] + v_sol_z[t1][x - time + 8][y - time + 8][z + 10];
float r37 = v_sol_z[t1][x - time + 8][y - time + 8][z + 4] - v_sol_z[t1][x - time + 8][y - time + 8][z + 11];
float r36 = -v_sol_x[t1][x - time + 7][y - time + 8][z + 8] + v_sol_x[t1][x - time + 8][y - time + 8][z + 8];
float r35 = v_sol_x[t1][x - time + 6][y - time + 8][z + 8] - v_sol_x[t1][x - time + 9][y - time + 8][z + 8];
float r34 = -v_sol_x[t1][x - time + 5][y - time + 8][z + 8] + v_sol_x[t1][x - time + 10][y - time + 8][z + 8];
float r33 = v_sol_x[t1][x - time + 4][y - time + 8][z + 8] - v_sol_x[t1][x - time + 11][y - time + 8][z + 8];
float r32 = 1.0 / h_y;
float r31 = 1.0 / h_z;
float r30 = 1.0 / h_x;
float r29 = r30 * (2.95943128300561e-3F * r33 + 4.06033972040332e-2F * r34 + 3.38361643361799e-1F * r35 + 5.07542465011829F * r36);
float r28 = r31 * (2.95943128300561e-3F * r37 + 4.06033972040332e-2F * r38 + 3.38361643361799e-1F * r39 + 5.07542465011829F * r40);
float r27 = r32 * (2.95943128300561e-3F * r41 + 4.06033972040332e-2F * r42 + 3.38361643361799e-1F * r43 + 5.07542465011829F * r44);
tau_sol_xx[t1][x - time + 8][y - time + 8][z + 8] = r27 + r28 + r30 * (5.91886256601123e-3F * r33 + 8.12067944080664e-2F * r34 + 6.76723286723597e-1F * r35 + 1.01508493002366e+1F * r36) + tau_sol_xx[t0][x - time + 8][y - time + 8][z + 8];
tau_sol_xy[t1][x - time + 8][y - time + 8][z + 8] = r30 * (2.53771232505914F * (r46 + v_sol_y[t1][x - time + 9][y - time + 8][z + 8]) + 1.47971564150281e-3F * (v_sol_y[t1][x - time + 5][y - time + 8][z + 8] - v_sol_y[t1][x - time + 12][y - time + 8][z + 8]) + 2.03016986020166e-2F * (-v_sol_y[t1][x - time + 6][y - time + 8][z + 8] + v_sol_y[t1][x - time + 11][y - time + 8][z + 8]) + 1.69180821680899e-1F * (v_sol_y[t1][x - time + 7][y - time + 8][z + 8] - v_sol_y[t1][x - time + 10][y - time + 8][z + 8])) + r32 * (2.53771232505914F * (r45 + v_sol_x[t1][x - time + 8][y - time + 9][z + 8]) + 1.47971564150281e-3F * (v_sol_x[t1][x - time + 8][y - time + 5][z + 8] - v_sol_x[t1][x - time + 8][y - time + 12][z + 8]) + 2.03016986020166e-2F * (-v_sol_x[t1][x - time + 8][y - time + 6][z + 8] + v_sol_x[t1][x - time + 8][y - time + 11][z + 8]) + 1.69180821680899e-1F * (v_sol_x[t1][x - time + 8][y - time + 7][z + 8] - v_sol_x[t1][x - time + 8][y - time + 10][z + 8])) + tau_sol_xy[t0][x - time + 8][y - time + 8][z + 8];
tau_sol_xz[t1][x - time + 8][y - time + 8][z + 8] = r30 * (2.53771232505914F * (r47 + v_sol_z[t1][x - time + 9][y - time + 8][z + 8]) + 1.47971564150281e-3F * (v_sol_z[t1][x - time + 5][y - time + 8][z + 8] - v_sol_z[t1][x - time + 12][y - time + 8][z + 8]) + 2.03016986020166e-2F * (-v_sol_z[t1][x - time + 6][y - time + 8][z + 8] + v_sol_z[t1][x - time + 11][y - time + 8][z + 8]) + 1.69180821680899e-1F * (v_sol_z[t1][x - time + 7][y - time + 8][z + 8] - v_sol_z[t1][x - time + 10][y - time + 8][z + 8])) + r31 * (2.53771232505914F * (r45 + v_sol_x[t1][x - time + 8][y - time + 8][z + 9]) + 1.47971564150281e-3F * (v_sol_x[t1][x - time + 8][y - time + 8][z + 5] - v_sol_x[t1][x - time + 8][y - time + 8][z + 12]) + 2.03016986020166e-2F * (-v_sol_x[t1][x - time + 8][y - time + 8][z + 6] + v_sol_x[t1][x - time + 8][y - time + 8][z + 11]) + 1.69180821680899e-1F * (v_sol_x[t1][x - time + 8][y - time + 8][z + 7] - v_sol_x[t1][x - time + 8][y - time + 8][z + 10])) + tau_sol_xz[t0][x - time + 8][y - time + 8][z + 8];
tau_sol_yy[t1][x - time + 8][y - time + 8][z + 8] = r28 + r29 + r32 * (5.91886256601123e-3F * r41 + 8.12067944080664e-2F * r42 + 6.76723286723597e-1F * r43 + 1.01508493002366e+1F * r44) + tau_sol_yy[t0][x - time + 8][y - time + 8][z + 8];
tau_sol_yz[t1][x - time + 8][y - time + 8][z + 8] = r31 * (2.53771232505914F * (r46 + v_sol_y[t1][x - time + 8][y - time + 8][z + 9]) + 1.47971564150281e-3F * (v_sol_y[t1][x - time + 8][y - time + 8][z + 5] - v_sol_y[t1][x - time + 8][y - time + 8][z + 12]) + 2.03016986020166e-2F * (-v_sol_y[t1][x - time + 8][y - time + 8][z + 6] + v_sol_y[t1][x - time + 8][y - time + 8][z + 11]) + 1.69180821680899e-1F * (v_sol_y[t1][x - time + 8][y - time + 8][z + 7] - v_sol_y[t1][x - time + 8][y - time + 8][z + 10])) + r32 * (2.53771232505914F * (r47 + v_sol_z[t1][x - time + 8][y - time + 9][z + 8]) + 1.47971564150281e-3F * (v_sol_z[t1][x - time + 8][y - time + 5][z + 8] - v_sol_z[t1][x - time + 8][y - time + 12][z + 8]) + 2.03016986020166e-2F * (-v_sol_z[t1][x - time + 8][y - time + 6][z + 8] + v_sol_z[t1][x - time + 8][y - time + 11][z + 8]) + 1.69180821680899e-1F * (v_sol_z[t1][x - time + 8][y - time + 7][z + 8] - v_sol_z[t1][x - time + 8][y - time + 10][z + 8])) + tau_sol_yz[t0][x - time + 8][y - time + 8][z + 8];
tau_sol_zz[t1][x - time + 8][y - time + 8][z + 8] = r27 + r29 + r31 * (5.91886256601123e-3F * r37 + 8.12067944080664e-2F * r38 + 6.76723286723597e-1F * r39 + 1.01508493002366e+1F * r40) + tau_sol_zz[t0][x - time + 8][y - time + 8][z + 8];
}
for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1)
{
//printf("\n Source_injection at : ");
int zind = sp_source_mask[x - time][y - time][sp_zi];
float r0 = save_src_fxx[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
float r1 = save_src_fyy[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
float r2 = save_src_fzz[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
tau_sol_xx[t1][x - time + 8][y - time + 8][zind + 8] += r0;
tau_sol_yy[t1][x - time + 8][y - time + 8][zind + 8] += r1;
tau_sol_zz[t1][x - time + 8][y - time + 8][zind + 8] += r2;
//printf(" Time %d , at : %d, %d \n", tw, x - time + 4, zind + 4);
}
}
}
}
}
}
}
}
}
}
/* End section0 */
STOP_TIMER(section0, timers)
/* End section0 */
return 0;
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
rwpng.c | /*
** PNG read/write functions
**
** © 1998-2000 by Greg Roelofs.
** © 2009-2017 by Kornel Lesiński.
**
** See COPYRIGHT file for license.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include "png.h" /* if this include fails, you need to install libpng (e.g. libpng-devel package) and run ./configure */
#include "rwpng.h"
#if USE_LCMS
#include "lcms2.h"
#endif
#ifndef Z_BEST_COMPRESSION
#define Z_BEST_COMPRESSION 9
#endif
#ifndef Z_BEST_SPEED
#define Z_BEST_SPEED 1
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#endif
#if PNG_LIBPNG_VER < 10400
#error libpng version 1.4 or later is required. 1.6 is recommended. You have an obsolete version of libpng or compiling on an outdated/unsupported operating system. Please upgrade.
#endif
#if PNG_LIBPNG_VER < 10500
typedef png_const_charp png_const_bytep;
#endif
static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg);
pngquant_error rwpng_read_image32_cocoa(FILE *infile, uint32_t *width, uint32_t *height, size_t *file_size, rwpng_rgba **image_data);
void rwpng_version_info(FILE *fp)
{
const char *pngver = png_get_header_ver(NULL);
#if USE_COCOA
fprintf(fp, " Color profiles are supported via Cocoa. Using libpng %s.\n", pngver);
#elif USE_LCMS
fprintf(fp, " Color profiles are supported via Little CMS. Using libpng %s.\n", pngver);
#else
fprintf(fp, " Compiled with no support for color profiles. Using libpng %s.\n", pngver);
#endif
#if PNG_LIBPNG_VER < 10600
if (strcmp(pngver, "1.3.") < 0) {
fputs("\nWARNING: Your version of libpng is outdated and may produce corrupted files.\n"
"Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp);
} else if (strcmp(pngver, "1.6.") < 0) {
#if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED)
fputs("\nWARNING: Your version of libpng is old and has buggy support for custom chunks.\n"
"Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp);
#endif
}
#endif
}
struct rwpng_read_data {
FILE *const fp;
png_size_t bytes_read;
};
#if !USE_COCOA
static void user_read_data(png_structp png_ptr, png_bytep data, png_size_t length)
{
struct rwpng_read_data *read_data = (struct rwpng_read_data *)png_get_io_ptr(png_ptr);
png_size_t read = fread(data, 1, length, read_data->fp);
if (!read) {
png_error(png_ptr, "Read error");
}
read_data->bytes_read += read;
}
#endif
struct rwpng_write_state {
FILE *outfile;
png_size_t maximum_file_size;
png_size_t bytes_written;
pngquant_error retval;
};
static void user_write_data(png_structp png_ptr, png_bytep data, png_size_t length)
{
struct rwpng_write_state *write_state = (struct rwpng_write_state *)png_get_io_ptr(png_ptr);
if (SUCCESS != write_state->retval) {
return;
}
if (!fwrite(data, length, 1, write_state->outfile)) {
write_state->retval = CANT_WRITE_ERROR;
}
write_state->bytes_written += length;
}
static void user_flush_data(png_structp png_ptr)
{
// libpng never calls this :(
}
static png_bytepp rwpng_create_row_pointers(png_infop info_ptr, png_structp png_ptr, unsigned char *base, size_t height, png_size_t rowbytes)
{
if (!rowbytes) {
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
}
png_bytepp row_pointers = malloc(height * sizeof(row_pointers[0]));
if (!row_pointers) return NULL;
for(size_t row = 0; row < height; row++) {
row_pointers[row] = base + row * rowbytes;
}
return row_pointers;
}
#if !USE_COCOA
static int read_chunk_callback(png_structp png_ptr, png_unknown_chunkp in_chunk)
{
if (0 == memcmp("iCCP", in_chunk->name, 5) ||
0 == memcmp("cHRM", in_chunk->name, 5) ||
0 == memcmp("gAMA", in_chunk->name, 5)) {
return 0; // not handled
}
if (in_chunk->location == 0 ) {
return 1; // ignore chunks with invalid location
}
struct rwpng_chunk **head = (struct rwpng_chunk **)png_get_user_chunk_ptr(png_ptr);
struct rwpng_chunk *chunk = malloc(sizeof(struct rwpng_chunk));
memcpy(chunk->name, in_chunk->name, 5);
chunk->size = in_chunk->size;
chunk->location = in_chunk->location;
chunk->data = in_chunk->size ? malloc(in_chunk->size) : NULL;
if (in_chunk->size) {
memcpy(chunk->data, in_chunk->data, in_chunk->size);
}
chunk->next = *head;
*head = chunk;
return 1; // marks as "handled", libpng won't store it
}
#endif
/*
retval:
0 = success
21 = bad sig
22 = bad IHDR
24 = insufficient memory
25 = libpng error (via longjmp())
26 = wrong PNG color type (no alpha channel)
*/
#if !USE_COCOA
static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg) {
fprintf(stderr, " libpng warning: %s\n", msg);
}
static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg) {
}
static pngquant_error rwpng_read_image24_libpng(FILE *infile, png24_image *mainprog_ptr, int strip, int verbose)
{
png_structp png_ptr = NULL;
png_infop info_ptr = NULL;
png_size_t rowbytes;
int color_type, bit_depth;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr,
rwpng_error_handler, verbose ? rwpng_warning_stderr_handler : rwpng_warning_silent_handler);
if (!png_ptr) {
return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */
}
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */
}
/* setjmp() must be called in every function that calls a non-trivial
* libpng function */
if (setjmp(mainprog_ptr->jmpbuf)) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return LIBPNG_FATAL_ERROR; /* fatal libpng error (via longjmp()) */
}
#if defined(PNG_SKIP_sRGB_CHECK_PROFILE) && defined(PNG_SET_OPTION_SUPPORTED)
png_set_option(png_ptr, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON);
#endif
#if PNG_LIBPNG_VER >= 10500 && defined(PNG_UNKNOWN_CHUNKS_SUPPORTED)
if (!strip) {
/* copy standard chunks too */
png_set_keep_unknown_chunks(png_ptr, PNG_HANDLE_CHUNK_IF_SAFE, (png_const_bytep)"pHYs\0iTXt\0tEXt\0zTXt", 4);
}
#endif
if (!strip) {
png_set_read_user_chunk_fn(png_ptr, &mainprog_ptr->chunks, read_chunk_callback);
}
struct rwpng_read_data read_data = {infile, 0};
png_set_read_fn(png_ptr, &read_data, user_read_data);
png_read_info(png_ptr, info_ptr); /* read all PNG info up to image data */
/* alternatively, could make separate calls to png_get_image_width(),
* etc., but want bit_depth and color_type for later [don't care about
* compression_type and filter_type => NULLs] */
png_get_IHDR(png_ptr, info_ptr, &mainprog_ptr->width, &mainprog_ptr->height,
&bit_depth, &color_type, NULL, NULL, NULL);
/* expand palette images to RGB, low-bit-depth grayscale images to 8 bits,
* transparency chunks to full alpha channel; strip 16-bit-per-sample
* images to 8 bits per sample; and convert grayscale to RGB[A] */
/* GRR TO DO: preserve all safe-to-copy ancillary PNG chunks */
if (!(color_type & PNG_COLOR_MASK_ALPHA)) {
#ifdef PNG_READ_FILLER_SUPPORTED
png_set_expand(png_ptr);
png_set_filler(png_ptr, 65535L, PNG_FILLER_AFTER);
#else
fprintf(stderr, "pngquant readpng: image is neither RGBA nor GA\n");
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
mainprog_ptr->retval = WRONG_INPUT_COLOR_TYPE;
return mainprog_ptr->retval;
#endif
}
if (bit_depth == 16) {
png_set_strip_16(png_ptr);
}
if (!(color_type & PNG_COLOR_MASK_COLOR)) {
png_set_gray_to_rgb(png_ptr);
}
/* get source gamma for gamma correction, or use sRGB default */
double gamma = 0.45455;
if (png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB)) {
mainprog_ptr->input_color = RWPNG_SRGB;
mainprog_ptr->output_color = RWPNG_SRGB;
} else {
png_get_gAMA(png_ptr, info_ptr, &gamma);
if (gamma > 0 && gamma <= 1.0) {
mainprog_ptr->input_color = RWPNG_GAMA_ONLY;
mainprog_ptr->output_color = RWPNG_GAMA_ONLY;
} else {
fprintf(stderr, "pngquant readpng: ignored out-of-range gamma %f\n", gamma);
mainprog_ptr->input_color = RWPNG_NONE;
mainprog_ptr->output_color = RWPNG_NONE;
gamma = 0.45455;
}
}
mainprog_ptr->gamma = gamma;
png_set_interlace_handling(png_ptr);
/* all transformations have been registered; now update info_ptr data,
* get rowbytes and channels, and allocate image memory */
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
// For overflow safety reject images that won't fit in 32-bit
if (rowbytes > INT_MAX/mainprog_ptr->height) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return PNG_OUT_OF_MEMORY_ERROR;
}
if ((mainprog_ptr->rgba_data = malloc(rowbytes * mainprog_ptr->height)) == NULL) {
fprintf(stderr, "pngquant readpng: unable to allocate image data\n");
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return PNG_OUT_OF_MEMORY_ERROR;
}
png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0);
/* now we can go ahead and just read the whole image */
png_read_image(png_ptr, row_pointers);
/* and we're done! (png_read_end() can be omitted if no processing of
* post-IDAT text/time/etc. is desired) */
png_read_end(png_ptr, NULL);
#if USE_LCMS
#if PNG_LIBPNG_VER < 10500
png_charp ProfileData;
#else
png_bytep ProfileData;
#endif
png_uint_32 ProfileLen;
cmsHPROFILE hInProfile = NULL;
/* color_type is read from the image before conversion to RGBA */
int COLOR_PNG = color_type & PNG_COLOR_MASK_COLOR;
/* embedded ICC profile */
if (png_get_iCCP(png_ptr, info_ptr, &(png_charp){0}, &(int){0}, &ProfileData, &ProfileLen)) {
hInProfile = cmsOpenProfileFromMem(ProfileData, ProfileLen);
cmsColorSpaceSignature colorspace = cmsGetColorSpace(hInProfile);
/* only RGB (and GRAY) valid for PNGs */
if (colorspace == cmsSigRgbData && COLOR_PNG) {
mainprog_ptr->input_color = RWPNG_ICCP;
mainprog_ptr->output_color = RWPNG_SRGB;
} else {
if (colorspace == cmsSigGrayData && !COLOR_PNG) {
mainprog_ptr->input_color = RWPNG_ICCP_WARN_GRAY;
mainprog_ptr->output_color = RWPNG_SRGB;
}
cmsCloseProfile(hInProfile);
hInProfile = NULL;
}
}
/* build RGB profile from cHRM and gAMA */
if (hInProfile == NULL && COLOR_PNG &&
!png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB) &&
png_get_valid(png_ptr, info_ptr, PNG_INFO_gAMA) &&
png_get_valid(png_ptr, info_ptr, PNG_INFO_cHRM)) {
cmsCIExyY WhitePoint;
cmsCIExyYTRIPLE Primaries;
png_get_cHRM(png_ptr, info_ptr, &WhitePoint.x, &WhitePoint.y,
&Primaries.Red.x, &Primaries.Red.y,
&Primaries.Green.x, &Primaries.Green.y,
&Primaries.Blue.x, &Primaries.Blue.y);
WhitePoint.Y = Primaries.Red.Y = Primaries.Green.Y = Primaries.Blue.Y = 1.0;
cmsToneCurve *GammaTable[3];
GammaTable[0] = GammaTable[1] = GammaTable[2] = cmsBuildGamma(NULL, 1/gamma);
hInProfile = cmsCreateRGBProfile(&WhitePoint, &Primaries, GammaTable);
cmsFreeToneCurve(GammaTable[0]);
mainprog_ptr->input_color = RWPNG_GAMA_CHRM;
mainprog_ptr->output_color = RWPNG_SRGB;
}
/* transform image to sRGB colorspace */
if (hInProfile != NULL) {
cmsHPROFILE hOutProfile = cmsCreate_sRGBProfile();
cmsHTRANSFORM hTransform = cmsCreateTransform(hInProfile, TYPE_RGBA_8,
hOutProfile, TYPE_RGBA_8,
INTENT_PERCEPTUAL,
omp_get_max_threads() > 1 ? cmsFLAGS_NOCACHE : 0);
if(!hTransform) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
cmsCloseProfile(hOutProfile);
cmsCloseProfile(hInProfile);
return LCMS_FATAL_ERROR;
}
#pragma omp parallel for \
if (mainprog_ptr->height*mainprog_ptr->width > 8000) \
schedule(static)
for (unsigned int i = 0; i < mainprog_ptr->height; i++) {
/* It is safe to use the same block for input and output,
when both are of the same TYPE. */
cmsDoTransform(hTransform, row_pointers[i],
row_pointers[i],
mainprog_ptr->width);
}
cmsDeleteTransform(hTransform);
cmsCloseProfile(hOutProfile);
cmsCloseProfile(hInProfile);
mainprog_ptr->gamma = 0.45455;
}
#endif
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
mainprog_ptr->file_size = read_data.bytes_read;
mainprog_ptr->row_pointers = (unsigned char **)row_pointers;
return SUCCESS;
}
#endif
static void rwpng_free_chunks(struct rwpng_chunk *chunk) {
if (!chunk) return;
rwpng_free_chunks(chunk->next);
free(chunk->data);
free(chunk);
}
void rwpng_free_image24(png24_image *image)
{
free(image->row_pointers);
image->row_pointers = NULL;
free(image->rgba_data);
image->rgba_data = NULL;
rwpng_free_chunks(image->chunks);
image->chunks = NULL;
}
void rwpng_free_image8(png8_image *image)
{
free(image->indexed_data);
image->indexed_data = NULL;
free(image->row_pointers);
image->row_pointers = NULL;
rwpng_free_chunks(image->chunks);
image->chunks = NULL;
}
pngquant_error rwpng_read_image24(FILE *infile, png24_image *out, int strip, int verbose)
{
#if USE_COCOA
rwpng_rgba *pixel_data;
pngquant_error res = rwpng_read_image32_cocoa(infile, &out->width, &out->height, &out->file_size, &pixel_data);
if (res != SUCCESS) {
return res;
}
out->gamma = 0.45455;
out->input_color = RWPNG_COCOA;
out->output_color = RWPNG_SRGB;
out->rgba_data = (unsigned char *)pixel_data;
out->row_pointers = malloc(sizeof(out->row_pointers[0])*out->height);
for(int i=0; i < out->height; i++) {
out->row_pointers[i] = (unsigned char *)&pixel_data[out->width*i];
}
return SUCCESS;
#else
return rwpng_read_image24_libpng(infile, out, strip, verbose);
#endif
}
static pngquant_error rwpng_write_image_init(rwpng_png_image *mainprog_ptr, png_structpp png_ptr_p, png_infopp info_ptr_p, int fast_compression)
{
/* could also replace libpng warning-handler (final NULL), but no need: */
*png_ptr_p = png_create_write_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, NULL);
if (!(*png_ptr_p)) {
return LIBPNG_INIT_ERROR; /* out of memory */
}
*info_ptr_p = png_create_info_struct(*png_ptr_p);
if (!(*info_ptr_p)) {
png_destroy_write_struct(png_ptr_p, NULL);
return LIBPNG_INIT_ERROR; /* out of memory */
}
/* setjmp() must be called in every function that calls a PNG-writing
* libpng function, unless an alternate error handler was installed--
* but compatible error handlers must either use longjmp() themselves
* (as in this program) or exit immediately, so here we go: */
if (setjmp(mainprog_ptr->jmpbuf)) {
png_destroy_write_struct(png_ptr_p, info_ptr_p);
return LIBPNG_INIT_ERROR; /* libpng error (via longjmp()) */
}
png_set_compression_level(*png_ptr_p, fast_compression ? Z_BEST_SPEED : Z_BEST_COMPRESSION);
png_set_compression_mem_level(*png_ptr_p, fast_compression ? 9 : 5); // judging by optipng results, smaller mem makes libpng compress slightly better
return SUCCESS;
}
static void rwpng_write_end(png_infopp info_ptr_p, png_structpp png_ptr_p, png_bytepp row_pointers)
{
png_write_info(*png_ptr_p, *info_ptr_p);
png_set_packing(*png_ptr_p);
png_write_image(*png_ptr_p, row_pointers);
png_write_end(*png_ptr_p, NULL);
png_destroy_write_struct(png_ptr_p, info_ptr_p);
}
static void rwpng_set_gamma(png_infop info_ptr, png_structp png_ptr, double gamma, rwpng_color_transform color)
{
if (color != RWPNG_GAMA_ONLY && color != RWPNG_NONE) {
png_set_gAMA(png_ptr, info_ptr, gamma);
}
if (color == RWPNG_SRGB) {
png_set_sRGB(png_ptr, info_ptr, 0); // 0 = Perceptual
}
}
pngquant_error rwpng_write_image8(FILE *outfile, png8_image *mainprog_ptr)
{
png_structp png_ptr;
png_infop info_ptr;
if (mainprog_ptr->num_palette > 256) return INVALID_ARGUMENT;
pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, mainprog_ptr->fast_compression);
if (retval) return retval;
struct rwpng_write_state write_state;
write_state = (struct rwpng_write_state){
.outfile = outfile,
.maximum_file_size = mainprog_ptr->maximum_file_size,
.retval = SUCCESS,
};
png_set_write_fn(png_ptr, &write_state, user_write_data, user_flush_data);
// Palette images generally don't gain anything from filtering
png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_VALUE_NONE);
rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma, mainprog_ptr->output_color);
/* set the image parameters appropriately */
int sample_depth;
#if PNG_LIBPNG_VER > 10400 /* old libpng corrupts files with low depth */
if (mainprog_ptr->num_palette <= 2)
sample_depth = 1;
else if (mainprog_ptr->num_palette <= 4)
sample_depth = 2;
else if (mainprog_ptr->num_palette <= 16)
sample_depth = 4;
else
#endif
sample_depth = 8;
struct rwpng_chunk *chunk = mainprog_ptr->chunks;
mainprog_ptr->metadata_size = 0;
int chunk_num=0;
while(chunk) {
png_unknown_chunk pngchunk = {
.size = chunk->size,
.data = chunk->data,
.location = chunk->location,
};
memcpy(pngchunk.name, chunk->name, 5);
png_set_unknown_chunks(png_ptr, info_ptr, &pngchunk, 1);
#if defined(PNG_HAVE_IHDR) && PNG_LIBPNG_VER < 10600
png_set_unknown_chunk_location(png_ptr, info_ptr, chunk_num, pngchunk.location ? pngchunk.location : PNG_HAVE_IHDR);
#endif
mainprog_ptr->metadata_size += chunk->size + 12;
chunk = chunk->next;
chunk_num++;
}
png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height,
sample_depth, PNG_COLOR_TYPE_PALETTE,
0, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_BASE);
png_color palette[256];
png_byte trans[256];
unsigned int num_trans = 0;
for(unsigned int i = 0; i < mainprog_ptr->num_palette; i++) {
palette[i] = (png_color){
.red = mainprog_ptr->palette[i].r,
.green = mainprog_ptr->palette[i].g,
.blue = mainprog_ptr->palette[i].b,
};
trans[i] = mainprog_ptr->palette[i].a;
if (mainprog_ptr->palette[i].a < 255) {
num_trans = i+1;
}
}
png_set_PLTE(png_ptr, info_ptr, palette, mainprog_ptr->num_palette);
if (num_trans > 0) {
png_set_tRNS(png_ptr, info_ptr, trans, num_trans, NULL);
}
rwpng_write_end(&info_ptr, &png_ptr, mainprog_ptr->row_pointers);
if (SUCCESS == write_state.retval && write_state.maximum_file_size && write_state.bytes_written > write_state.maximum_file_size) {
return TOO_LARGE_FILE;
}
return write_state.retval;
}
pngquant_error rwpng_write_image24(FILE *outfile, const png24_image *mainprog_ptr)
{
png_structp png_ptr;
png_infop info_ptr;
pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, 0);
if (retval) return retval;
png_init_io(png_ptr, outfile);
rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma, mainprog_ptr->output_color);
png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height,
8, PNG_COLOR_TYPE_RGB_ALPHA,
0, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_BASE);
png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0);
rwpng_write_end(&info_ptr, &png_ptr, row_pointers);
free(row_pointers);
return SUCCESS;
}
static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg)
{
rwpng_png_image *mainprog_ptr;
/* This function, aside from the extra step of retrieving the "error
* pointer" (below) and the fact that it exists within the application
* rather than within libpng, is essentially identical to libpng's
* default error handler. The second point is critical: since both
* setjmp() and longjmp() are called from the same code, they are
* guaranteed to have compatible notions of how big a jmp_buf is,
* regardless of whether _BSD_SOURCE or anything else has (or has not)
* been defined. */
fprintf(stderr, " error: %s (libpng failed)\n", msg);
fflush(stderr);
mainprog_ptr = png_get_error_ptr(png_ptr);
if (mainprog_ptr == NULL) abort();
longjmp(mainprog_ptr->jmpbuf, 1);
}
|
GB_unop__identity_bool_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_uint64)
// op(A') function: GB (_unop_tran__identity_bool_uint64)
// C type: bool
// A type: uint64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_uint64)
(
bool *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
csr_matvec_oomp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
#include "_hypre_utilities.hpp"
#if defined(HYPRE_USING_DEVICE_OPENMP)
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
/* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */
HYPRE_Int
hypre_CSRMatrixMatvecOutOfPlaceOOMP( HYPRE_Int trans,
HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A);
HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A);
HYPRE_Int A_nnz = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A) + offset;
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int y_size = hypre_VectorSize(y) - offset;
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *b_data = hypre_VectorData(b) + offset;
HYPRE_Complex *y_data = hypre_VectorData(y) + offset;
HYPRE_Int i;
#ifdef HYPRE_USING_CUSPARSE
cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle());
cusparseMatDescr_t descr = hypre_HandleCusparseMatDescr(hypre_handle());
#endif
//hypre_CSRMatrixPrefetch(A, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(b, HYPRE_MEMORY_DEVICE);
//if (b != y)
//{
// hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
//}
if (b != y)
{
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, b_data)
for (i = 0; i < y_size; i++)
{
y_data[i] = b_data[i];
}
}
if (x == y)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR::x and y are the same pointer in hypre_CSRMatrixMatvecDevice\n");
}
// TODO
if (offset != 0)
{
hypre_printf("WARNING:: Offset is not zero in hypre_CSRMatrixMatvecDevice :: \n");
}
hypre_assert(offset == 0);
if (trans)
{
HYPRE_Complex *csc_a = hypre_TAlloc(HYPRE_Complex, A->num_nonzeros, HYPRE_MEMORY_DEVICE);
HYPRE_Int *csc_j = hypre_TAlloc(HYPRE_Int, A->num_nonzeros, HYPRE_MEMORY_DEVICE);
HYPRE_Int *csc_i = hypre_TAlloc(HYPRE_Int, A->num_cols+1, HYPRE_MEMORY_DEVICE);
HYPRE_CUSPARSE_CALL( cusparseDcsr2csc(handle, A->num_rows, A->num_cols, A->num_nonzeros,
A->data, A->i, A->j, csc_a, csc_j, csc_i,
CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO) );
#ifdef HYPRE_USING_CUSPARSE
HYPRE_CUSPARSE_CALL( cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
A->num_cols, A->num_rows, A->num_nonzeros,
&alpha, descr,
csc_a, csc_i, csc_j,
x->data, &beta, y->data) );
#else
#pragma omp target teams distribute parallel for private(i) is_device_ptr(csc_a, csc_i, csc_j, y_data, x_data)
for (i = 0; i < A_ncols; i++)
{
HYPRE_Complex tempx = 0.0;
HYPRE_Int j;
for (j = csc_i[i]; j < csc_i[i+1]; j++)
{
tempx += csc_a[j] * x_data[csc_j[j]];
}
y_data[i] = alpha*tempx + beta*y_data[i];
}
#endif
hypre_TFree(csc_a, HYPRE_MEMORY_DEVICE);
hypre_TFree(csc_i, HYPRE_MEMORY_DEVICE);
hypre_TFree(csc_j, HYPRE_MEMORY_DEVICE);
}
else
{
#ifdef HYPRE_USING_CUSPARSE
HYPRE_CUSPARSE_CALL( cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
A_nrows, A_ncols, A_nnz,
&alpha, descr,
A_data, A_i, A_j,
x_data, &beta, y_data) );
#else
#pragma omp target teams distribute parallel for private(i) is_device_ptr(A_data, A_i, A_j, y_data, x_data)
for (i = 0; i < A_num_rows; i++)
{
HYPRE_Complex tempx = 0.0;
HYPRE_Int j;
for (j = A_i[i]; j < A_i[i+1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[i] = alpha*tempx + beta*y_data[i];
}
#endif
}
return hypre_error_flag;
}
#endif /* #if defined(HYPRE_USING_DEVICE_OPENMP) */
|
regul.h | #ifndef REGUL_H
#define REGUL_H
#include "linalg.h"
Timer timer_global, timer_global2, timer_global3;
enum regul_t { L2, L1, ELASTICNET, L1BALL, L2BALL, FUSEDLASSO, L1L2, L1LINF, NONE, INCORRECT_REG };
static bool is_regul_for_matrices(const regul_t& reg) {
return reg==L1L2 || reg==L1LINF;
}
template <typename T> struct ParamModel {
ParamModel() { regul=NONE; lambda=0; lambda2=0; lambda3=0; intercept=false; loss=SQUARE; };
loss_t loss;
regul_t regul;
T lambda;
T lambda2;
T lambda3;
bool intercept;
};
template <typename T>
void clean_param_model(ParamModel<T>& param) {
if (param.regul==FUSEDLASSO && param.lambda==0) {
param.regul=ELASTICNET;
param.lambda=param.lambda2;
param.lambda2=param.lambda3;
};
if (param.regul==ELASTICNET) {
if (param.lambda==0) { param.regul=L2; param.lambda=param.lambda2; };
if (param.lambda2==0) param.regul=L1;
if (param.lambda==0 && param.lambda2==0) param.regul=NONE;
} else {
if (param.lambda==0)
param.regul=NONE;
}
}
static regul_t regul_from_string(char* regul) {
if (strcmp(regul,"l1")==0) return L1;
if (strcmp(regul,"l1-ball")==0) return L1BALL;
if (strcmp(regul,"fused-lasso")==0) return FUSEDLASSO;
if (strcmp(regul,"l2")==0) return L2;
if (strcmp(regul,"l2-ball")==0) return L2BALL;
if (strcmp(regul,"elastic-net")==0) return ELASTICNET;
if (strcmp(regul,"l1l2")==0) return L1L2;
if (strcmp(regul,"l1linf")==0) return L1LINF;
if (strcmp(regul,"none")==0) return NONE;
return INCORRECT_REG;
}
template <typename D, typename I>
class Regularizer {
public:
typedef typename D::value_type T;
typedef I index_type;
Regularizer(const ParamModel<T>& model) : _intercept(model.intercept), _id(model.regul) { };
virtual ~Regularizer() { };
virtual void prox(const D& input, D& output, const T eta) const = 0; // should be able to do inplace with output=input
virtual T eval(const D& input) const = 0;
virtual T fenchel(D& grad1, D& grad2) const = 0;
virtual void print() const = 0;
virtual bool is_lazy() const { return false; };
virtual void lazy_prox(const D& input, D& output, const Vector<I>& indices, const T eta) const { };
virtual bool provides_fenchel() const { return true; };
virtual regul_t id() const { return _id;};
virtual bool intercept() const { return _intercept;};
virtual T strong_convexity() const { return 0; };
virtual T lambda() const { return 0;};
protected:
const bool _intercept;
private:
explicit Regularizer<D,I>(const Regularizer<D,I>& reg);
Regularizer<D,I>& operator=(const Regularizer<D,I>& reg);
const regul_t _id;
};
template <typename D, typename I>
class None final : public Regularizer<D,I> {
public:
typedef typename D::value_type T;
None(const ParamModel<T>& model) : Regularizer<D,I>(model) { };
virtual void prox(const D& input, D& output, const T eta) const {
output.copy(input);
};
inline T eval(const D& input) const { return 0; };
inline T fenchel(D& grad1, D& grad2) const { return 0; };
bool provides_fenchel() const { return false; };
void print() const {
cout << "No regularization" << endl;
}
};
template <typename D, typename I>
class Ridge final : public Regularizer<D,I> {
public:
typedef typename D::value_type T;
Ridge(const ParamModel<T>& model) : Regularizer<D,I>(model), _lambda(model.lambda) { };
inline void prox(const D& input, D& output, const T eta) const {
output.copy(input);
output.scal(T(1.0/(1.0+_lambda*eta)));
if (this->_intercept) {
const int n = input.n();
output[n-1]=input[n-1];
}
};
inline T eval(const D& input) const {
const int n = input.n();
const T res = input.nrm2sq();
return (this->_intercept ? T(0.5)*_lambda*(res - input[n-1]*input[n-1]) : T(0.5)*_lambda*res);
};
inline T fenchel(D& grad1, D& grad2) const {
return (this->_intercept & (abs<T>(grad2[grad2.n()-1]) >
1e-6)) ? INFINITY : this->eval(grad2)/(_lambda*_lambda);
};
void print() const {
cout << "L2 regularization" << endl;
}
virtual T strong_convexity() const { return this->_intercept ? 0 : _lambda; };
virtual T lambda() const { return _lambda; };
inline void lazy_prox(const D& input, D& output, const Vector<I>& indices, const T eta) const {
const T scal = T(1.0)/(T(1.0)+_lambda*eta);
const int p = input.n();
const int r = indices.n();
for (int jj=0; jj<r; ++jj)
output[indices[jj]]=scal*input[indices[jj]];
if (this->_intercept) output[p-1]=input[p-1];
};
virtual bool is_lazy() const { return true; };
private:
const T _lambda;
};
template <typename D, typename I>
class Lasso final : public Regularizer<D,I> {
public:
typedef typename D::value_type T;
Lasso(const ParamModel<T>& model) : Regularizer<D,I>(model), _lambda(model.lambda) { };
inline void prox(const D& input, D& output, const T eta) const {
input.fastSoftThrshold(output,eta*_lambda);
if (this->_intercept) {
const int n = input.n();
output[n-1]=input[n-1];
}
};
inline T eval(const D& input) const {
const int n = input.n();
const T res = input.asum();
return (this->_intercept ? _lambda*(res - abs<T>(input[n-1])) : _lambda*res);
};
inline T fenchel(D& grad1, D& grad2) const {
const T mm = grad2.fmaxval();
if (mm > _lambda)
grad1.scal(_lambda/mm);
return (this->_intercept & (abs<T>(grad2[grad2.n()-1]) >
1e-6)) ? INFINITY : 0;
};
void print() const {
cout << "L1 regularization" << endl;
}
virtual T lambda() const { return _lambda;};
inline void lazy_prox(const D& input, D& output, const Vector<I>& indices, const T eta) const {
const int p = input.n();
const int r = indices.n();
const T thrs=_lambda*eta;
//#pragma omp parallel for
for (int jj=0; jj<r; ++jj)
output[indices[jj]]=fastSoftThrs(input[indices[jj]],thrs);;
if (this->_intercept) output[p-1]=input[p-1];
};
virtual bool is_lazy() const { return true; };
private:
const T _lambda;
};
template <typename D, typename I>
class ElasticNet final : public Regularizer<D,I> {
public:
typedef typename D::value_type T;
// min_x 0.5|y-x|^2 + lambda_1 |x| + 0.5 lambda_2 x^2
// min_x - y x + 0.5 x^2 + lambda_1 |x| + 0.5 lambda_2 x^2
// min_x - y x + 0.5 (1+lambda_2) x^2 + lambda_1 |x|
// min_x - y/(1+lambda2) x + 0.5 x^2 + lambda_1/(1+lambda2) |x|
ElasticNet(const ParamModel<T>& model) : Regularizer<D,I>(model), _lambda(model.lambda), _lambda2(model.lambda2) {
};
inline void prox(const D& input, D& output, const T eta) const {
output.copy(input);
output.fastSoftThrshold(_lambda*eta);
output.scal(T(1.0)/(1+_lambda2*eta));
if (this->_intercept) {
const int n = input.n();
output[n-1]=input[n-1];
}
};
inline T eval(const D& input) const {
const int n = input.n();
const T res = _lambda*input.asum() + T(0.5)*_lambda2*input.nrm2sq();
return (this->_intercept ? res - _lambda*abs<T>(input[n-1]) - T(0.5)*_lambda2*input[n-1]*input[n-1] : res);
};
// max_x xy - lambda_1 |x| - 0.5 lambda_2 x^2
// - min_x - xy + lambda_1 |x| + 0.5 lambda_2 x^2
// -(1/lambda2) min_x - xy/lambda2 + lambda_1/lambda_2 |x| + 0.5 x^2
// x^* = prox_(l1 lambda_1/lambda_2) [ y/lambda2]
// x^* = prox_(l1 lambda_1) [ y] /_lambda2
inline T fenchel(D& grad1, D& grad2) const {
D tmp;
tmp.copy(grad2);
grad2.fastSoftThrshold(_lambda);
const int n = grad2.n();
T res0 = _lambda*grad2.asum()/_lambda2 + T(0.5)*grad2.nrm2sq()/_lambda2;
if (this->_intercept) res0 -= _lambda*abs<T>(grad2[n-1])/_lambda2 - T(0.5)*grad2[n-1]*grad2[n-1]/_lambda2;
const T res = tmp.dot(grad2)/_lambda2 - res0;
return (this->_intercept & (abs<T>(tmp[tmp.n()-1]) >
1e-6)) ? INFINITY : res;
};
void print() const {
cout << "Elastic Net regularization" << endl;
}
virtual T strong_convexity() const { return this->_intercept ? 0 : _lambda2; };
virtual T lambda() const { return _lambda;};
inline void lazy_prox(const D& input, D& output, const Vector<I>& indices, const T eta) const {
const int p = input.n();
const int r = indices.n();
const T thrs=_lambda*eta;
const T scal = T(1.0)/(T(1.0)+_lambda2*eta);
#pragma omp parallel for
for (int jj=0; jj<r; ++jj)
output[indices[jj]]=scal*fastSoftThrs(input[indices[jj]],thrs);;
if (this->_intercept) output[p-1]=input[p-1];
};
virtual bool is_lazy() const { return true; };
private:
const T _lambda;
const T _lambda2;
};
template <typename D, typename I>
class L1Ball final : public Regularizer<D,I> {
public:
typedef typename D::value_type T;
L1Ball(const ParamModel<T>& model) : Regularizer<D,I>(model), _lambda(model.lambda) { };
inline void prox(const D& input, D& output, const T eta) const {
D tmp;
tmp.copy(input);
if (this->_intercept) {
tmp[tmp.n()-1]=0;
tmp.sparseProject(output,_lambda,1,0,0,0,false);
output[output.n()-1] = input[output.n()-1];
} else {
tmp.sparseProject(output,_lambda,1,0,0,0,false);
}
};
inline T eval(const D& input) const { return 0; };
inline T fenchel(D& grad1, D& grad2) const {
Vector<T> output;
output.copy(grad2);
if (this->_intercept) output[output.n()-1]=0;
return _lambda*(output.fmaxval());
};
void print() const {
cout << "L1 ball regularization" << endl;
}
virtual T lambda() const { return _lambda;};
private:
const T _lambda;
};
template <typename D, typename I>
class L2Ball final : public Regularizer<D,I> {
public:
typedef typename D::value_type T;
L2Ball(const ParamModel<T>& model) : Regularizer<D,I>(model), _lambda(model.lambda) { };
inline void prox(const D& input, D& output, const T eta) const {
D tmp;
tmp.copy(input);
if (this->_intercept) {
tmp[tmp.n()-1]=0;
const T nrm = tmp.nrm2();
if (nrm > _lambda)
tmp.scal(_lambda/nrm);
output[output.n()-1] = input[output.n()-1];
} else {
const T nrm = tmp.nrm2();
if (nrm > _lambda)
tmp.scal(_lambda/nrm);
}
};
inline T eval(const D& input) const { return 0; };
inline T fenchel(D& grad1, D& grad2) const {
Vector<T> output;
output.copy(grad2);
if (this->_intercept) output[output.n()-1]=0;
return _lambda*(output.nrm2());
};
void print() const {
cout << "L1 ball regularization" << endl;
}
virtual T lambda() const { return _lambda;};
private:
const T _lambda;
};
template <typename D, typename I>
class FusedLasso final : public Regularizer<D,I> {
public:
typedef typename D::value_type T;
FusedLasso(const ParamModel<T>& model) : Regularizer<D,I>(model), _lambda(model.lambda), _lambda2(model.lambda2), _lambda3(model.lambda3) { };
inline void prox(const D& x, D& output, const T eta) const {
output.resize(x.n());
Vector<T> copyx;
copyx.copy(x);
copyx.fusedProjectHomotopy(output,_lambda2,_lambda,_lambda3,true);
};
inline T eval(const D& x) const {
T sum = T();
const int maxn = this->_intercept ? x.n()-1 : x.n();
for (int i = 0; i<maxn-1; ++i)
sum += _lambda*abs(x[i+1]-x[i]) + _lambda2*abs(x[i]) + T(0.5)*_lambda3*x[i]*x[i];
sum += _lambda2*abs(x[maxn-1])+0.5*_lambda3*x[maxn-1]*x[maxn-1];
return sum;
};
inline T fenchel(D& grad1, D& grad2) const { return 0; };
void print() const {
cout << "Fused Lasso regularization" << endl;
}
bool provides_fenchel() const { return false; };
virtual T strong_convexity() const { return this->_intercept ? 0 : _lambda3; };
virtual T lambda() const { return _lambda;};
private:
const T _lambda;
const T _lambda2;
const T _lambda3;
};
template <typename Reg>
class RegMat final : public Regularizer< Matrix<typename Reg::T>, typename Reg::index_type > {
public:
typedef typename Reg::T T;
typedef typename Reg::index_type I;
RegMat(const ParamModel<T>& model, const int num_cols, const bool transpose) : Regularizer< Matrix<T>, I >(model), _N(num_cols), _transpose(transpose) {
_regs=new Reg*[_N];
for (int i = 0; i<_N; ++i)
_regs[i]=new Reg(model);
};
virtual ~RegMat() {
for (int i = 0; i<_N; ++i) {
delete(_regs[i]);
_regs[i]=NULL;
}
delete[](_regs);
};
void inline prox(const Matrix<T>& x, Matrix<T>& y, const T eta) const {
y.copy(x);
int i;
#pragma omp parallel for private(i)
for (i = 0; i<_N; ++i) {
Vector<T> colx, coly;
if (_transpose) {
x.copyRow(i,colx);
y.copyRow(i,coly);
} else {
x.refCol(i,colx);
y.refCol(i,coly);
}
_regs[i]->prox(colx,coly,eta);
if (_transpose)
y.copyToRow(i,coly);
}
};
T inline eval(const Matrix<T>& x) const {
T sum = 0;
#pragma omp parallel for reduction(+ : sum)
for (int i = 0; i<_N; ++i) {
Vector<T> col;
if (_transpose) {
x.copyRow(i,col);
} else {
x.refCol(i,col);
}
const T val = _regs[i]->eval(col);
sum += val;
}
return sum;
};
T inline fenchel(Matrix<T>& grad1, Matrix<T>& grad2) const {
T sum=0;
#pragma omp parallel for reduction(+ : sum)
for (int i = 0; i<_N; ++i) {
Vector<T> col1, col2;
if (_transpose) {
grad1.copyRow(i,col1);
grad2.copyRow(i,col2);
} else {
grad1.refCol(i,col1);
grad2.refCol(i,col2);
}
const T fench=_regs[i]->fenchel(col1,col2);
sum+=fench;
if (_transpose) {
grad1.copyToRow(i,col1);
grad2.copyToRow(i,col2);
}
}
return sum;
};
virtual bool provides_fenchel() const {
bool ok=true;
for (int i = 0; i<_N; ++i)
ok = ok && _regs[i]->provides_fenchel();
return ok;
};
void print() const {
cout << "Regularization for matrices" << endl;
_regs[0]->print();
};
virtual T lambda() const { return _regs[0]->lambda();};
inline void lazy_prox(const Matrix<T>& input, Matrix<T>& output, const Vector<I>& indices, const T eta) const {
#pragma omp parallel for
for (int i = 0; i<_N; ++i) {
Vector<T> colx, coly;
output.refCol(i,coly);
if (_transpose) {
input.copyRow(i,colx);
} else {
input.refCol(i,colx);
}
_regs[i]->lazy_prox(colx,coly,indices,eta);
}
};
virtual bool is_lazy() const { return _regs[0]->is_lazy(); };
protected:
int _N;
Reg** _regs;
bool _transpose;
};
/// TODO, faire classe plus general sur [W b] avec vec(W)
template <typename Reg>
class RegVecToMat final : public Regularizer< Matrix<typename Reg::T>, typename Reg::index_type > {
public:
typedef typename Reg::T T;
typedef typename Reg::index_type I;
typedef Matrix<T> D;
RegVecToMat(const ParamModel<T>& model) : Regularizer<D,I>(model), _intercept(model.intercept) {
ParamModel<T> model2=model;
model2.intercept=false;
_reg=new Reg(model2);
};
~RegVecToMat() { delete(_reg);};
inline void prox(const D& input, D& output, const T eta) const {
Vector<T> w1, w2, b1, b2;
output.resize(input.m(),input.n());
get_wb(input,w1,b1);
get_wb(output,w2,b2);
_reg->prox(w1,w2,eta);
if (_intercept) b2.copy(b1);
};
inline T eval(const D& input) const {
Vector<T> w, b;
get_wb(input,w,b);
return _reg->eval(w);
}
inline T fenchel(D& grad1, D& grad2) const {
Vector<T> g1;
grad1.toVect(g1);
Vector<T> w, b;
get_wb(grad2,w,b);
return (this->_intercept && ((b.nrm2sq()) > 1e-7) ? INFINITY : _reg->fenchel(g1,w));
};
void print() const {
_reg->print();
}
virtual T strong_convexity() const {
return _intercept ? 0 : _reg->strong_convexity();
};
virtual T lambda() const { return _reg->lambda(); };
inline void lazy_prox(const D& input, D& output, const Vector<I>& indices, const T eta) const {
Vector<T> w1, w2, b1, b2;
output.resize(input.m(),input.n());
get_wb(input,w1,b1);
get_wb(output,w2,b2);
_reg->lazy_prox(w1,w2,indices,eta);
if (_intercept) b2.copy(b1);
};
virtual bool is_lazy() const { return _reg->is_lazy(); };
private:
inline void get_wb(const Matrix<T>& input, Vector<T>& w, Vector<T>& b) const {
const int p = input.n();
Matrix<T> W;
if (_intercept) {
input.refSubMat(0,p-1,W);
input.refCol(p-1,b);
} else {
input.refSubMat(0,p,W);
}
W.toVect(w);
};
Reg* _reg;
const bool _intercept;
};
template <typename T>
struct normL2 {
typedef T value_type;
static inline void prox(Vector<T>& x, const T thrs) {
const T nrm=x.nrm2();
if (nrm > thrs) {
x.scal((nrm-thrs)/nrm);
} else {
x.setZeros();
}
};
static inline T eval(const Vector<T>& x) {
return x.nrm2();
};
static inline void print() {
cout << "L2";
};
static inline T eval_dual(const Vector<T>& x) {
return x.nrm2();
};
};
template <typename T>
struct normLinf {
typedef T value_type;
static inline void prox(Vector<T>& x, const T thrs) {
Vector<T> z;
x.l1project(z,thrs);
x.sub(z);
};
static inline T eval(const Vector<T>& x) {
return x.fmaxval();
};
static inline void print() {
cout << "LInf";
};
static inline T eval_dual(const Vector<T>& x) {
return x.asum();
};
};
template <typename N, typename I>
class MixedL1LN final : public Regularizer< Matrix<typename N::value_type>, I > {
public:
typedef typename N::value_type T;
typedef Matrix<T> D;
MixedL1LN(const ParamModel<T>& model, const int nclass, const bool transpose) :
Regularizer<D,I>(model), _transpose(transpose), _lambda(model.lambda) { };
inline void prox(const D& x, D& y, const T eta) const {
const T thrs=_lambda*eta;
const int n = x.n();
const int m = x.m();
y.copy(x);
if (_transpose) {
const int nn = this->_intercept ? n-1 : n;
#pragma omp parallel for
for (int i = 0; i<nn; ++i) {
Vector<T> col;
y.refCol(i,col);
N::prox(col,thrs);
}
} else {
const int nn = this->_intercept ? m-1 : m;
#pragma omp parallel for
for (int i = 0; i<nn; ++i) {
Vector<T> row;
y.copyRow(i,row);
N::prox(row,thrs);
y.copyToRow(i,row);
}
}
};
T inline eval(const D& x) const {
T sum=0;
const int n = x.n();
const int m = x.m();
if (_transpose) {
const int nn = this->_intercept ? n-1 : n;
#pragma omp parallel for reduction(+ : sum)
for (int i = 0; i<nn; ++i) {
Vector<T> col;
x.refCol(i,col);
sum+=N::eval(col);
}
} else {
const int nn = this->_intercept ? m-1 : m;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i<nn; ++i) {
Vector<T> row;
x.copyRow(i,row);
sum+=N::eval(row);
}
}
return _lambda*sum;
}
// grad1 is nclasses * n
inline T fenchel(D& grad1, D& grad2) const {
const int n = grad2.n();
const int m = grad2.m();
T res=0;
T mm=0;
if (_transpose) {
const int nn = this->_intercept ? n-1 : n;
for (int i = 0; i<nn; ++i) {
Vector<T> col;
grad2.refCol(i,col);
mm = MAX(N::eval_dual(col),mm);
}
Vector<T> col;
if (this->_intercept) {
grad2.refCol(nn,col);
if (col.nrm2sq() > T(1e-7)) res=INFINITY;
}
} else {
const int nn = this->_intercept ? m-1 : m;
for (int i = 0; i<nn; ++i) {
Vector<T> row;
grad2.copyRow(i,row);
mm = MAX(N::eval_dual(row),mm);
}
Vector<T> col;
if (this->_intercept) {
grad2.copyRow(nn,col);
if (col.nrm2sq() > T(1e-7)) res=INFINITY;
}
}
if (mm > _lambda)
grad1.scal(_lambda/mm);
return res;
};
void print() const {
cout << "Mixed L1-";
N::print();
cout << " norm regularization" << endl;
}
inline T lambda() const { return _lambda; };
inline void lazy_prox(const D& input, D& output, const Vector<I>& indices, const T eta) const {
output.resize(input.m(),input.n());
const int r = indices.n();
const T thrs=_lambda*eta;
const int m = input.m();
const int n = input.n();
if (_transpose) {
#pragma omp parallel for
for (int i = 0; i<r; ++i) {
const int ind=indices[i];
Vector<T> col, col1;
input.refCol(ind,col1);
output.refCol(ind,col);
col.copy(col1);
N::prox(col,thrs);
}
if (this->_intercept) {
Vector<T> col, col1;
input.refCol(n-1,col1);
output.refCol(n-1,col);
col.copy(col1);
}
} else {
#pragma omp parallel for
for (int i = 0; i<r; ++i) {
const int ind=indices[i];
Vector<T> col;
input.copyRow(ind,col);
N::prox(col,thrs);
output.copyToRow(ind,col);
}
if (this->_intercept) {
Vector<T> col;
input.copyRow(m-1,col);
output.copyToRow(m-1,col);
}
}
};
virtual bool is_lazy() const { return true; };
private:
const bool _transpose;
const T _lambda;
};
template <typename T, typename I>
using MixedL1L2=MixedL1LN< normL2<T>, I >;
template <typename T, typename I>
using MixedL1Linf=MixedL1LN< normLinf<T> , I>;
#endif
|
atomicConstructSimple.c | int main() {
int x = 0;
#pragma omp parallel
{
int localX = 10;
#pragma omp critical
#pragma omp flush
//#pragma omp atomic update
// x += localX;
}
#pragma omp parallel
{
int localX = 10;
#pragma omp critical
#pragma omp atomic update
x += localX;
}
}
|
GB_binop__bset_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_uint16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_uint16)
// C=scalar+B GB (_bind1st__bset_uint16)
// C=scalar+B' GB (_bind1st_tran__bset_uint16)
// C=A+scalar GB (_bind2nd__bset_uint16)
// C=A'+scalar GB (_bind2nd_tran__bset_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = GB_BITSET (aij, bij, uint16_t, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, uint16_t, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_UINT16 || GxB_NO_BSET_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bset_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, uint16_t, 16) ; \
}
GrB_Info GB (_bind1st_tran__bset_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, uint16_t, 16) ; \
}
GrB_Info GB (_bind2nd_tran__bset_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
compression.h | #pragma once
#include <cassert>
#include <vector>
#include <memory>
#include <unordered_map>
#include <cuda_runtime.h>
namespace compression
{
template<typename TOut, typename TIn>
void cast(TOut* dst, const TIn* src, ptrdiff_t numel)
{
#pragma omp parallel for
for (ptrdiff_t i = 0; i < numel; ++i)
dst[i] = static_cast<TOut>(src[i]);
}
template<typename TOut, typename TIn>
std::vector<TOut> cast(const std::vector<TIn>& v)
{
const size_t n = v.size();
std::vector<TOut> o(n);
cast<TOut, TIn>(o.data(), v.data(), n);
return o;
}
/**
* Specifies a slice through an axis (python slice notation)
*/
struct Slice
{
ptrdiff_t start, end, step;
Slice(ptrdiff_t start=0, ptrdiff_t end=-1, ptrdiff_t step=1)
: start(start), end(end), step(step)
{}
/**
* Realizes this slice with the actual size of that dimension.
* End-indices that start from the back (i.e. negative numbers)
* are converted to their actual index.
* Example:
* end=-1, s=42 -> end=42
* end=-2, s=42 -> end=41
*/
void realize(ptrdiff_t s)
{
if (end < 0) end = s + end + 1;
}
ptrdiff_t numel() const
{
return (end - start) / step;
}
ptrdiff_t at(ptrdiff_t i) const
{
return start + i * step;
}
};
/**
* Raw volume in c-style order (last dimension is fastest)
*/
template<typename T, bool Fortran>
class RawVolume
{
private:
std::vector<T> memory_;
std::vector<size_t> dimensions_;
std::vector<size_t> strides_;
enum {
IsFortran = Fortran, //Fortran-order
IsC = !Fortran //C-order
};
public:
RawVolume(RawVolume const&) = delete;
RawVolume(RawVolume&&) = delete;
RawVolume& operator=(RawVolume const&) = delete;
RawVolume& operator=(RawVolume&&) = delete;
static size_t prod(const std::vector<size_t>& d)
{
if (d.empty()) return 0;
size_t v = d[0];
for (size_t i = 1; i < d.size(); ++i)
v *= d[i];
return v;
}
static std::vector<size_t> computeStrides(const std::vector<size_t>& d, std::bool_constant<false> cOrder)
{
//c-style
std::vector<size_t> strides(d.size(), 1);
for (int i=d.size()-2; i>=0; --i)
{
strides[i] = strides[i + 1] * d[i + 1];
}
return strides;
}
static std::vector<size_t> computeStrides(const std::vector<size_t>& d, std::bool_constant<true> fortranOrder)
{
//fortran-style
std::vector<size_t> strides(d.size(), 1);
for (int i = 1; i < d.size(); ++i)
{
strides[i] = strides[i - 1] * d[i - 1];
}
return strides;
}
RawVolume(const std::vector<size_t>& dimensions)
: memory_(prod(dimensions)), dimensions_(dimensions)
, strides_(computeStrides(dimensions, std::bool_constant<Fortran>()))
{}
RawVolume(const std::vector<size_t>& dimensions, const std::vector<size_t>& strides)
: memory_(prod(dimensions)), dimensions_(dimensions)
, strides_(strides)
{}
RawVolume(std::vector<T>&& memory, const std::vector<size_t>& dimensions)
: memory_(memory), dimensions_(dimensions)
, strides_(computeStrides(dimensions, std::bool_constant<Fortran>()))
{
assert(memory_.size() == prod(dimensions));
}
[[nodiscard]] constexpr bool isCOrder() const { return IsC; }
[[nodiscard]] constexpr bool isFortranOrder() const { return IsFortran; }
[[nodiscard]] size_t numel() const { return memory_.size(); }
[[nodiscard]] const std::vector<size_t>& dimensions() const
{
return dimensions_;
}
[[nodiscard]] const std::vector<size_t>& strides() const
{
return strides_;
}
[[nodiscard]] T* data() { return memory_.data(); }
[[nodiscard]] const T* data() const { return memory_.data(); }
template<typename TOut>
[[nodiscard]] std::shared_ptr<RawVolume<TOut, Fortran>> cast() const
{
auto o = std::make_shared<RawVolume<TOut, Fortran>>(dimensions(), strides());
compression::cast<TOut, T>(o->data(), data(), numel());
return o;
}
private:
template<bool F>
void fillDimension(std::shared_ptr<RawVolume<T, F>> out, const std::vector<Slice>& slices, int dim, ptrdiff_t offsetIn, ptrdiff_t offsetOut) const
{
if (dim==dimensions_.size())
{
//recursion end
out->data()[offsetOut] = memory_[offsetIn];
return;
}
const Slice& slice = slices[dim];
ptrdiff_t strideIn = strides_[dim];
ptrdiff_t strideOut = out->strides()[dim];
auto numel = slice.numel();
for (size_t iOut=0; iOut<numel; ++iOut)
{
auto iIn = slice.at(iOut);
fillDimension(out, slices, dim + 1, offsetIn + strideIn * iIn, offsetOut + strideOut * iOut);
}
}
public:
template<bool FortranOut = Fortran>
[[nodiscard]] std::shared_ptr<RawVolume<T, FortranOut>> slice(const std::vector<Slice>& slices) const
{
//assemble slices
std::vector<Slice> slices2(dimensions_.size());
std::vector<size_t> newdim(dimensions_.size());
for (int i=0; i<dimensions_.size(); ++i)
{
Slice s = i < slices.size() ? slices[i] : Slice();
s.realize(dimensions_[i]);
slices2[i] = s;
assert(s.numel() > 0);
newdim[i] = s.numel();
}
//create and fill data
auto out = std::make_shared<RawVolume<T, FortranOut>>(newdim);
fillDimension(out, slices2, 0, 0, 0);
return out;
}
[[nodiscard]] std::shared_ptr<RawVolume<T, false>> toCStyle() const
{
return slice<false>({});
}
[[nodiscard]] std::shared_ptr<RawVolume<T, true>> toFortranStyle() const
{
return slice<true>({});
}
};
template<typename T>
using RawVolumeCStyle_ptr = std::shared_ptr<RawVolume<T, false>>;
template<typename T>
using RawVolumeFortranStyle_ptr = std::shared_ptr<RawVolume<T, true>>;
class CompressedVolume
{
private:
bool owned_;
void* memory_;
size_t size_;
public:
CompressedVolume(CompressedVolume const&) = delete;
CompressedVolume(CompressedVolume&&) = delete;
CompressedVolume& operator=(CompressedVolume const&) = delete;
CompressedVolume& operator=(CompressedVolume&&) = delete;
CompressedVolume(size_t size)
: owned_(true), memory_(malloc(size)), size_(size)
{}
CompressedVolume(void* memory, size_t size)
: owned_(false), memory_(memory), size_(size)
{}
~CompressedVolume()
{
if (owned_)
free(memory_);
}
/**
* The size in bytes
*/
[[nodiscard]] size_t size() const { return size_; }
/**
* The raw pointer to the data
*/
[[nodiscard]] const void* data() const { return memory_; }
/**
* The raw pointer to the data (non-const)
*/
[[nodiscard]] void* data() { return memory_; }
};
typedef std::shared_ptr<CompressedVolume> CompressedVolume_ptr;
/**
* The metric for computing the target compression rate
*/
enum TThreshTarget
{
EPS, //relative error
RMSE, //
PSNR //
};
/**
* Knows keys, provided by all compression and decompression methods below
* - "time_ms" : execution time in milliseconds
* - "total_memory_cpu" : The total cumulative memory that is allocated on the CPU throughout the algorithm in bytes
* - "peak_memory_cpu" : The peak memory usage on the CPU in bytes. It holds peak_memory_cpu <= total_memory_cpu
* - "total_memory_gpu" : The total cumulative memory that is allocated on the GPU throughout the algorithm in bytes
* - "peak_memory_gpu" : The peak memory usage on the GPU in bytes. It holds peak_memory_gpu <= total_memory_gpu
*/
typedef std::unordered_map<std::string, long long> Statistics_t;
std::tuple<CompressedVolume_ptr, Statistics_t> compressTThresh(RawVolumeFortranStyle_ptr<double> volume, TThreshTarget target, double targetValue, bool verbose);
std::tuple<RawVolumeFortranStyle_ptr<double>, Statistics_t> decompressTThresh(CompressedVolume_ptr v, bool verbose);
std::tuple<CompressedVolume_ptr, Statistics_t> compressTThreshChunked(const std::vector<RawVolumeFortranStyle_ptr<double>>& volume, TThreshTarget target, double targetValue, bool verbose);
std::tuple<std::vector<RawVolumeFortranStyle_ptr<double>>, Statistics_t> decompressTThreshChunked(CompressedVolume_ptr v, bool verbose);
std::tuple<CompressedVolume_ptr, Statistics_t> compressCUDA(RawVolumeCStyle_ptr<float> volume, int numLevels, float quantizationStep, bool verbose, int numChunks=1);
std::tuple<RawVolumeCStyle_ptr<float>, Statistics_t> decompressCUDA(CompressedVolume_ptr v, bool verbose);
//Compression using manual chunking. Each chunk must have the same size
std::tuple<CompressedVolume_ptr, Statistics_t> compressCUDAChunked(const std::vector<RawVolumeCStyle_ptr<float>>& volumes, int numLevels, float quantizationStep, bool verbose);
std::tuple<std::vector<RawVolumeCStyle_ptr<float>>, Statistics_t> decompressCUDAChunked(CompressedVolume_ptr v, bool verbose);
/**
* Interactive decompressed of chunked cudaCompress.
* The original volume is compressed with \ref compressCUDAChunked
* and can be decompressed chunk-by-chunk here.
*/
class CudaCompressInteractiveDecompression
{
struct impl;
std::unique_ptr<impl> pImpl;
public:
CudaCompressInteractiveDecompression(CompressedVolume_ptr v);
~CudaCompressInteractiveDecompression();
[[nodiscard]] int chunkWidth() const;
[[nodiscard]] int chunkHeight() const;
[[nodiscard]] int chunkDepth() const;
[[nodiscard]] int numChunks() const;
enum class DataType
{
TypeUChar,
TypeUShort,
TypeFloat,
_TypeCount_
};
Statistics_t decompress(int chunk, cudaSurfaceObject_t target, DataType targetDtype);
//The accumulated statistics over all decompression calls
Statistics_t globalStatistics();
CudaCompressInteractiveDecompression(const CudaCompressInteractiveDecompression& other) = delete;
CudaCompressInteractiveDecompression(CudaCompressInteractiveDecompression&& other) noexcept = delete;
CudaCompressInteractiveDecompression& operator=(const CudaCompressInteractiveDecompression& other) = delete;
CudaCompressInteractiveDecompression& operator=(CudaCompressInteractiveDecompression&& other) noexcept = delete;
};
typedef std::shared_ptr<CudaCompressInteractiveDecompression> CudaCompressInteractiveDecompression_ptr;
} |
DenseSegment.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_DENSESEGMENT_H_
#define SRC_DENSESEGMENT_H_
#include "GMDP/utils/edgelist.h"
#include "GMDP/utils/bitvector.h"
#include "GMDP/singlenode/unionreduce.h"
#include <string>
#include <vector>
#include <sstream>
#include <cstdio>
inline double get_compression_threshold();
enum compression_decision
{
NONE,
COMPRESSED,
SERIALIZED
};
struct send_metadata
{
int nnz;
size_t serialized_nbytes;
size_t serialized_npartitions;
friend class boost::serialization::access;
template<class Archive>
void serialize(Archive & ar, const unsigned int version)
{
ar & nnz;
ar & serialized_nbytes;
ar & serialized_npartitions;
}
};
template <typename T>
class buffer
{
public:
bool uninitialized;
int nnz;
int capacity;
int num_ints;
size_t serialized_nbytes;
size_t serialized_npartitions;
T * value;
int * bit_vector;
T * compressed_data;
int * compressed_indices;
char * serialized_data;
size_t * serialized_partition_nbytes_scan;
size_t * serialized_partition_nnz_scan;
// Serialize
friend boost::serialization::access;
template<class Archive>
void save(Archive& ar, const unsigned int version) const {
ar & uninitialized;
ar & nnz;
ar & capacity;
ar & num_ints;
ar & serialized_nbytes;
ar & serialized_npartitions;
for(int i = 0 ; i < capacity; i++)
{
ar & value[i];
}
for(int i = 0 ; i < num_ints; i++)
{
ar & bit_vector[i];
}
for(int i = 0 ; i < capacity ; i++)
{
ar & compressed_data[i];
}
for(int i = 0 ; i < capacity ; i++)
{
ar & compressed_indices[i];
}
for(int i = 0 ; i < serialized_nbytes; i++)
{
ar & serialized_data[i];
}
for(int i = 0 ; i < serialized_npartitions + 1; i++)
{
ar & serialized_partition_nbytes_scan[i];
}
for(int i = 0 ; i < serialized_npartitions + 1; i++)
{
ar & serialized_partition_nnz_scan[i];
}
}
template<class Archive>
void load(Archive& ar, const unsigned int version) {
ar & uninitialized;
ar & nnz;
ar & capacity;
ar & num_ints;
ar & serialized_nbytes;
ar & serialized_npartitions;
delete [] value;
delete [] bit_vector;
delete [] compressed_data;
delete [] compressed_indices;
delete [] serialized_data;
delete [] serialized_partition_nbytes_scan;
delete [] serialized_partition_nnz_scan;
value = new T[capacity];
bit_vector = new int[num_ints];
compressed_data = new T[capacity];
compressed_indices = new int[capacity];
serialized_data = new char[serialized_nbytes];
serialized_partition_nbytes_scan = new size_t[serialized_npartitions+1];
serialized_partition_nnz_scan = new size_t[serialized_npartitions+1];
for(int i = 0 ; i < capacity; i++)
{
ar & value[i];
}
for(int i = 0 ; i < num_ints; i++)
{
ar & bit_vector[i];
}
for(int i = 0 ; i < capacity ; i++)
{
ar & compressed_data[i];
}
for(int i = 0 ; i < capacity ; i++)
{
ar & compressed_indices[i];
}
for(int i = 0 ; i < serialized_nbytes; i++)
{
ar & serialized_data[i];
}
for(int i = 0 ; i < serialized_npartitions + 1; i++)
{
ar & serialized_partition_nbytes_scan[i];
}
for(int i = 0 ; i < serialized_npartitions + 1; i++)
{
ar & serialized_partition_nnz_scan[i];
}
}
BOOST_SERIALIZATION_SPLIT_MEMBER()
buffer(int _capacity, int _num_ints)
{
capacity = _capacity;
num_ints = _num_ints;
value = new T[capacity];
bit_vector = new int[num_ints];
//compressed_data = reinterpret_cast<T*>(_mm_malloc(capacity * sizeof(T) + capacity*sizeof(int), 64));
compressed_data = new T[capacity];
compressed_indices = new int[capacity];
uninitialized = true;
serialized_data = new char[0];
serialized_nbytes = 0;
serialized_npartitions = omp_get_max_threads() * 16;
serialized_partition_nbytes_scan = new size_t[serialized_npartitions+1];
serialized_partition_nnz_scan = new size_t[serialized_npartitions+1];
}
buffer() : buffer(0,0) {}
void alloc_serialized(size_t sz)
{
delete [] serialized_data;
serialized_data = new char[sz];
serialized_nbytes = sz;
}
int compute_nnz() const
{
int len = 0;
#pragma omp parallel for reduction(+:len)
for (int ii = 0 ; ii < num_ints ; ii++) {
int p = _popcnt32(bit_vector[ii]);
len += p;
}
return len;
}
int compute_nnz(int start, int finish) const
{
int len = 0;
#pragma omp parallel for reduction(+:len)
for (int ii = start ; ii < finish ; ii++) {
int p = _popcnt32(bit_vector[ii]);
len += p;
}
return len;
}
template<bool EXTENDS_SERIALIZABLE = std::is_base_of<Serializable,T>::value,
typename std::enable_if<EXTENDS_SERIALIZABLE>::type* = nullptr>
void decompress()
{
memset(bit_vector, 0, num_ints* sizeof(int));
std::stringstream * sss = new std::stringstream[serialized_npartitions];
#pragma omp parallel for
for(int p = 0 ; p < serialized_npartitions ; p++)
{
int i_per_partition = (num_ints + serialized_npartitions - 1) / serialized_npartitions;
int start_i = i_per_partition * p;
int end_i = i_per_partition * (p+1);
if(end_i > num_ints) end_i = num_ints;
sss[p].write(serialized_data + serialized_partition_nbytes_scan[p],
(serialized_partition_nbytes_scan[p+1]-serialized_partition_nbytes_scan[p]));
boost::archive::binary_iarchive ia(sss[p]);
for(unsigned long int i = 0 ; i < (serialized_partition_nnz_scan[p+1] -
serialized_partition_nnz_scan[p]) ; i++)
{
int idx;
ia >> idx;
ia >> value[idx];
set_bitvector(idx, bit_vector);
}
}
delete [] sss;
}
template<bool EXTENDS_SERIALIZABLE = std::is_base_of<Serializable,T>::value,
typename std::enable_if<!EXTENDS_SERIALIZABLE>::type* = nullptr>
void decompress()
{
memset(bit_vector, 0, num_ints* sizeof(int));
//compressed_indices = reinterpret_cast<int*>(compressed_data + nnz);
int npartitions = omp_get_max_threads();
int * start_nnzs = new int[npartitions];
int * end_nnzs = new int[npartitions];
int mystart = 0;
int my_nz_per = (nnz + npartitions - 1) / npartitions;
my_nz_per = ((my_nz_per + 31) / 32) * 32;
for(int p = 0 ; p < npartitions ; p++)
{
start_nnzs[p] = mystart;
mystart += my_nz_per;
if(mystart > nnz) mystart = nnz;
if(mystart < nnz)
{
int start32 = compressed_indices[mystart] / 32;
while((mystart < nnz) && compressed_indices[mystart] / 32 == start32) mystart++;
}
end_nnzs[p] = mystart;
}
#pragma omp parallel for
for(int p = 0 ; p < npartitions ; p++)
{
int start_nnz = start_nnzs[p];
int end_nnz = end_nnzs[p];
for(int i = start_nnz ; i < end_nnz ; i++)
{
int idx = compressed_indices[i];
set_bitvector(idx, bit_vector);
value[idx] = compressed_data[i];
}
}
delete [] start_nnzs;
delete [] end_nnzs;
}
template<bool EXTENDS_SERIALIZABLE = std::is_base_of<Serializable,T>::value,
typename std::enable_if<EXTENDS_SERIALIZABLE>::type* = nullptr>
void compress()
{
size_t * serialized_partition_nbytes = new size_t[serialized_npartitions];
size_t * serialized_partition_nnz = new size_t[serialized_npartitions];
std::stringstream * sss = new std::stringstream[serialized_npartitions];
#pragma omp parallel for
for(int p = 0 ; p < serialized_npartitions ; p++)
{
int i_per_partition = (num_ints + serialized_npartitions - 1) / serialized_npartitions;
int start_i = i_per_partition * p;
int end_i = i_per_partition * (p+1);
if(end_i > num_ints) end_i = num_ints;
serialized_partition_nnz[p] = 0;
boost::archive::binary_oarchive oa(sss[p]);
for(int ii = start_i ; ii < end_i ; ii++)
{
if(_popcnt32(bit_vector[ii]) == 0) continue;
for(int i = ii*32 ; i < (ii+1)*32 ; i++)
{
if(get_bitvector(i, bit_vector))
{
oa << i;
oa << value[i];
serialized_partition_nnz[p]++;
}
}
}
sss[p].seekg(0, sss[p].end);
size_t sz = sss[p].tellg();
sss[p].seekg(0, sss[p].beg);
serialized_partition_nbytes[p] = sz;
}
serialized_partition_nnz_scan[0] = 0;
serialized_partition_nbytes_scan[0] = 0;
for(int p = 0 ; p < serialized_npartitions ; p++)
{
serialized_partition_nnz_scan[p+1] = serialized_partition_nnz_scan[p] + serialized_partition_nnz[p];
serialized_partition_nbytes_scan[p+1] = serialized_partition_nbytes_scan[p] + serialized_partition_nbytes[p];
}
size_t sz = serialized_partition_nbytes_scan[serialized_npartitions];
alloc_serialized(sz);
#pragma omp parallel for
for(int p = 0 ; p < serialized_npartitions ; p++)
{
sss[p].read(serialized_data + serialized_partition_nbytes_scan[p], serialized_partition_nbytes[p]);
}
delete [] serialized_partition_nnz;
delete [] serialized_partition_nbytes;
delete [] sss;
}
template<bool EXTENDS_SERIALIZABLE = std::is_base_of<Serializable,T>::value,
typename std::enable_if<!EXTENDS_SERIALIZABLE>::type* = nullptr>
void compress()
{
int npartitions = omp_get_max_threads() * 16;
int * partition_nnz = new int[npartitions];
int * partition_nnz_scan = new int[npartitions+1];
#pragma omp parallel for
for(int p = 0 ; p < npartitions ; p++)
{
int i_per_partition = (num_ints + npartitions - 1) / npartitions;
int start_i = i_per_partition * p;
int end_i = i_per_partition * (p+1);
if(end_i > num_ints) end_i = num_ints;
partition_nnz[p] = compute_nnz(start_i, end_i);
}
partition_nnz_scan[0] = 0;
nnz = 0;
for(int p = 0 ; p < npartitions ; p++)
{
partition_nnz_scan[p+1] = partition_nnz_scan[p] + partition_nnz[p];
nnz += partition_nnz[p];
}
#pragma omp parallel for
for(int p = 0 ; p < npartitions ; p++)
{
int i_per_partition = (num_ints + npartitions - 1) / npartitions;
int start_i = i_per_partition * p;
int end_i = i_per_partition * (p+1);
if(end_i > num_ints) end_i = num_ints;
int nzcnt = partition_nnz_scan[p];
for(int ii = start_i ; ii < end_i ; ii++)
{
if(_popcnt32(bit_vector[ii]) == 0) continue;
for(int i = ii*32 ; i < (ii+1)*32 ; i++)
{
if(get_bitvector(i, bit_vector))
{
compressed_data[nzcnt] = value[i];
compressed_indices[nzcnt] = i;
nzcnt++;
}
}
}
}
delete [] partition_nnz;
delete [] partition_nnz_scan;
}
~buffer()
{
delete [] value;
delete [] bit_vector;
delete [] compressed_data;
delete [] compressed_indices;
delete [] serialized_partition_nbytes_scan;
delete [] serialized_partition_nnz_scan;
delete [] serialized_data;
}
};
template <typename T>
class DenseSegment {
public:
std::string name;
int capacity;
int num_ints;
buffer<T> *properties;
send_metadata received_md;
std::vector<send_metadata> queued_md;
std::vector<buffer<T> * > received;
std::vector<buffer<T> * > uninitialized;
friend boost::serialization::access;
template<class Archive>
void save(Archive& ar, const unsigned int version) const {
bool properties_is_null = (properties == NULL);
ar & properties_is_null;
ar & name;
ar & capacity;
ar & num_ints;
if(properties != NULL)
{
ar & properties;
}
ar & received_md;
ar & queued_md;
ar & received;
ar & uninitialized;
}
template<class Archive>
void load(Archive& ar, const unsigned int version) {
bool properties_null;
ar & properties_null;
ar & name;
ar & capacity;
ar & num_ints;
if(!properties_null)
{
ar & properties;
}
else
{
properties = NULL;
}
ar & received_md;
ar & queued_md;
ar & received;
ar & uninitialized;
}
BOOST_SERIALIZATION_SPLIT_MEMBER()
DenseSegment(int n) {
capacity = n;
num_ints = (n + sizeof(int) * 8 - 1) / (sizeof(int) * 8);
properties = NULL;
}
DenseSegment() : DenseSegment(0) {}
void ingestEdges(edge_t<T>* edges, int _m, int _nnz, int row_start)
{
alloc();
initialize();
for (uint64_t i = 0; i < (uint64_t)_nnz; i++) {
int src = edges[i].src - row_start - 1;
set_bitvector(src, properties->bit_vector);
properties->value[src] = edges[i].val;
}
properties->nnz = _nnz;
properties->uninitialized = false;
}
~DenseSegment()
{
if(properties != NULL)
{
delete properties;
}
for(auto it = received.begin() ; it != received.end() ; it++)
{
delete *it;
}
received.clear();
for(auto it = uninitialized.begin() ; it != uninitialized.end() ; it++)
{
delete *it;
}
uninitialized.clear();
}
int compute_nnz() const
{
if(properties == NULL) return 0;
if(properties->uninitialized) return 0;
return properties->compute_nnz();
}
int compute_nnz(int start, int finish) const
{
if(properties == NULL) return 0;
if(properties->uninitialized) return 0;
return properties->compute_nnz(start, finish);
}
compression_decision should_compress(int test_nnz)
{
if(std::is_base_of<Serializable,T>::value) return SERIALIZED;
if(test_nnz > get_compression_threshold() * capacity)
return NONE;
return COMPRESSED;
}
void compress()
{
alloc();
initialize();
if(should_compress(properties->nnz) == COMPRESSED ||
should_compress(properties->nnz) == SERIALIZED)
{
properties->compress();
}
}
void decompress()
{
assert(properties);
if(should_compress(properties->nnz) == COMPRESSED ||
should_compress(properties->nnz) == SERIALIZED)
{
properties->decompress();
}
}
void set_uninitialized_received()
{
for(auto it = received.begin() ; it != received.end() ; it++)
{
(*it)->uninitialized = true;
uninitialized.push_back(*it);
}
received.clear();
}
void set_uninitialized() {
set_uninitialized_received();
if(properties != NULL)
{
properties->uninitialized = true;
properties->nnz = 0;
}
}
void alloc() {
if(properties == NULL)
{
properties = new buffer<T>(capacity, num_ints);
}
}
void initialize()
{
if(properties->uninitialized)
{
memset(properties->bit_vector, 0, num_ints* sizeof(int));
properties->nnz = 0;
}
properties->uninitialized = false;
}
int getNNZ()
{
return properties->nnz;
}
void set(int idx, T val) {
alloc();
initialize();
if(!get_bitvector(idx-1, properties->bit_vector)) properties->nnz++;
properties->value[idx - 1] = val;
set_bitvector(idx-1, properties->bit_vector);
properties->uninitialized = false;
}
void unset(int idx) {
alloc();
initialize();
if(get_bitvector(idx-1, properties->bit_vector)) properties->nnz--;
clear_bitvector(idx-1, properties->bit_vector);
properties->uninitialized = false;
}
void setAll(T val) {
alloc();
//initialize();
properties->uninitialized=false;
if(num_ints == 0) return;
properties->bit_vector[num_ints-1] = 0;
#pragma omp parallel for
for(int i = 0 ; i < num_ints-1 ; i++)
{
properties->bit_vector[i] = 0xFFFFFFFF;
}
for(int idx = std::max(0, capacity-32) ; idx < capacity ; idx++)
{
set_bitvector(idx, properties->bit_vector);
}
properties->nnz = capacity;
#pragma omp parallel for
for(int i = 0 ; i < capacity ; i++)
{
properties->value[i] = val;
}
}
T get(const int idx) const {
assert(properties);
assert(!properties->uninitialized);
return properties->value[idx - 1];
}
void send_nnz(int myrank, int dst_rank, std::vector<MPI_Request>* requests) {
send_metadata md = {properties->nnz, properties->serialized_nbytes, properties->serialized_npartitions};
MPI_Send(&md, sizeof(md), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD);
}
void recv_nnz_queue(int myrank, int src_rank,
std::vector<MPI_Request>* requests) {
send_metadata md;
MPI_Recv(&md, sizeof(md), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
queued_md.insert(queued_md.begin(), md);
}
void recv_nnz(int myrank, int src_rank,
std::vector<MPI_Request>* requests) {
alloc();
MPI_Recv(&received_md, sizeof(send_metadata), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
}
void send_segment(int myrank, int dst_rank, std::vector<MPI_Request>* requests) {
if(should_compress(properties->nnz) == COMPRESSED)
{
MPI_Request r1;
MPI_Request r2;
MPI_Isend(properties->compressed_data, properties->nnz * sizeof(T), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD,
&r1);
MPI_Isend(properties->compressed_indices, properties->nnz * sizeof(int), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD,
&r2);
requests->push_back(r1);
requests->push_back(r2);
}
else if(should_compress(properties->nnz) == SERIALIZED)
{
MPI_Request r1;
MPI_Request r2;
MPI_Request r3;
MPI_Isend(properties->serialized_data, properties->serialized_nbytes, MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD, &r1);
MPI_Isend(properties->serialized_partition_nnz_scan, (properties->serialized_npartitions+1) * sizeof(size_t), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD, &r2);
MPI_Isend(properties->serialized_partition_nbytes_scan, (properties->serialized_npartitions+1) * sizeof(size_t), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD, &r3);
requests->push_back(r1);
requests->push_back(r2);
requests->push_back(r3);
}
else
{
MPI_Request r1;
MPI_Request r2;
MPI_Isend(properties->value, capacity * sizeof(T), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD,
&r1);
MPI_Isend(properties->bit_vector, num_ints * sizeof(int), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD,
&r2);
requests->push_back(r1);
requests->push_back(r2);
}
}
void recv_buffer(send_metadata md,
buffer<T> * p,
int myrank, int src_rank,
std::vector<MPI_Request>* requests) {
p->nnz = md.nnz;
p->serialized_nbytes = md.serialized_nbytes;
p->serialized_npartitions = md.serialized_npartitions;
if(should_compress(p->nnz) == COMPRESSED)
{
MPI_Request r1;
MPI_Request r2;
MPI_Irecv(p->compressed_data, p->nnz * sizeof(T), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD,
&r1);
MPI_Irecv(p->compressed_indices, p->nnz * sizeof(int), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD,
&r2);
requests->push_back(r1);
requests->push_back(r2);
}
else if(should_compress(p->nnz) == SERIALIZED)
{
MPI_Request r1;
MPI_Request r2;
MPI_Request r3;
p->alloc_serialized(p->serialized_nbytes);
MPI_Irecv(p->serialized_data, p->serialized_nbytes, MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, &r1);
MPI_Irecv(p->serialized_partition_nnz_scan, (p->serialized_npartitions+1) * sizeof(size_t), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, &r2);
MPI_Irecv(p->serialized_partition_nbytes_scan, (p->serialized_npartitions+1) * sizeof(size_t), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, &r3);
requests->push_back(r1);
requests->push_back(r2);
requests->push_back(r3);
}
else
{
MPI_Request r1;
MPI_Request r2;
MPI_Irecv(p->value, capacity * sizeof(T), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD,
&r1);
MPI_Irecv(p->bit_vector, num_ints* sizeof(int), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD,
&r2);
requests->push_back(r1);
requests->push_back(r2);
}
p->uninitialized = false;
}
void recv_segment_queue(int myrank, int src_rank,
std::vector<MPI_Request>* requests) {
buffer<T> * new_properties;
if(uninitialized.size() > 0)
{
new_properties = uninitialized.back();
uninitialized.pop_back();
}
else
{
new_properties = new buffer<T>(capacity, num_ints);
}
send_metadata md = queued_md.back();
queued_md.pop_back();
recv_buffer(md, new_properties, myrank, src_rank, requests);
received.push_back(new_properties);
}
void recv_segment(int myrank, int src_rank,
std::vector<MPI_Request>* requests) {
recv_buffer(received_md, properties, myrank, src_rank, requests);
}
void save(std::string fname, int start_id, int _m, bool includeHeader)
{
int nnz = compute_nnz();
std::ofstream fout;
fout.open(fname);
if(includeHeader)
{
fout << _m << " " << nnz << std::endl;
}
for(int i = 0 ; i < capacity ; i++)
{
if(get_bitvector(i, properties->bit_vector))
{
fout << i + start_id << " " << properties->value[i] << std::endl;
}
}
fout.close();
}
void get_edges(edge_t<T> * edges, unsigned int start_nz) const
{
unsigned int mycnt = 0;
for(int i = 0 ; i < capacity ; i++)
{
if(get_bitvector(i, properties->bit_vector))
{
edges[mycnt].src = start_nz + i + 1;
edges[mycnt].dst = 1;
edges[mycnt].val = properties->value[i];
mycnt++;
}
}
}
template <typename Ta, typename Tb, typename Tc>
void union_received(void (*op_fp)(const Ta&, const Tb&, Tc*, void*), void* vsp) {
alloc();
initialize();
for(auto it = received.begin() ; it != received.end() ; it++)
{
if(should_compress((*it)->nnz) == COMPRESSED)
{
union_compressed((*it)->compressed_data, (*it)->compressed_indices, (*it)->nnz, capacity, num_ints, properties->value, properties->bit_vector, op_fp, vsp);
}
else if(should_compress((*it)->nnz) == SERIALIZED)
{
(*it)->decompress();
//union_dense((*it)->value, (*it)->bit_vector, capacity, num_ints, properties->value, properties->bit_vector, properties->value, properties->bit_vector, op_fp, vsp);
union_dense(properties->value, properties->bit_vector, capacity, num_ints, (*it)->value, (*it)->bit_vector, properties->value, properties->bit_vector, op_fp, vsp);
}
else
{
//union_dense((*it)->value, (*it)->bit_vector, capacity, num_ints, properties->value, properties->bit_vector, properties->value, properties->bit_vector, op_fp, vsp);
union_dense(properties->value, properties->bit_vector, capacity, num_ints, (*it)->value, (*it)->bit_vector, properties->value, properties->bit_vector, op_fp, vsp);
}
}
}
};
#endif // SRC_DENSESEGMENT_H_
|
GB_unaryop__ainv_uint16_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint16_bool
// op(A') function: GB_tran__ainv_uint16_bool
// C type: uint16_t
// A type: bool
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint16_bool
(
uint16_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint16_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
veccopy-ompt-target-tracing.c | #include <stdio.h>
#include <assert.h>
#include <omp.h>
#include "callbacks.h"
// Calls to start/stop/flush_trace to be injected by the tool
int main()
{
int N = 100000;
int a[N];
int b[N];
int i;
for (i=0; i<N; i++)
a[i]=0;
for (i=0; i<N; i++)
b[i]=i;
start_trace();
#pragma omp target parallel for
{
for (int j = 0; j< N; j++)
a[j]=b[j];
}
flush_trace();
stop_trace();
start_trace();
#pragma omp target teams distribute parallel for
{
for (int j = 0; j< N; j++)
a[j]=b[j];
}
stop_trace();
int rc = 0;
for (i=0; i<N; i++)
if (a[i] != b[i] ) {
rc++;
printf ("Wrong value: a[%d]=%d\n", i, a[i]);
}
if (!rc)
printf("Success\n");
return rc;
}
|
Wparentheses-3.c | /* PR c/70436 */
/* { dg-additional-options "-Wparentheses -fno-openmp" } */
int a, b, c;
void bar (void);
void baz (void);
void
f1 (void)
{
int i, j;
if (a) /* { dg-warning "ambiguous" } */
#pragma omp for
for (i = 0; i < 10; i++)
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
while (1)
#pragma omp for
for (i = 0; i < 10; i++)
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
for (i = 0; i < 10; i++)
#pragma omp for
for (j = 0; j < 10; j++)
if (b)
bar ();
else
baz ();
if (a)
#pragma omp for
for (i = 0; i < 10; i++)
if (b) /* { dg-warning "ambiguous" } */
#pragma omp parallel for
for (j = 0; j < 10; j++)
if (c)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
#pragma omp taskloop
for (i = 0; i < 10; i++)
if (b)
#pragma omp parallel for
for (j = 0; j < 10; j++)
if (c)
bar ();
else
baz ();
else
bar ();
if (a) /* { dg-warning "ambiguous" } */
#pragma omp taskloop simd
for (i = 0; i < 10; i++)
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
#pragma omp for collapse(2)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
#pragma omp critical
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
for (i = 0; i < 10; i++)
#pragma omp simd
for (j = 0; j < 10; j++)
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
#pragma omp for simd schedule(runtime)
for (i = 0; i < 10; i++)
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
#pragma omp master
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
#pragma omp parallel
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
for (i = 0; i < 10; i++)
#pragma omp parallel for
for (j = 0; j < 10; j++)
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
for (i = 0; i < 10; i++)
#pragma omp parallel for simd
for (j = 0; j < 10; j++)
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
#pragma omp single
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
#pragma omp task
if (b)
bar ();
else
baz ();
if (a) /* { dg-warning "ambiguous" } */
#pragma omp taskgroup
if (b)
bar ();
else
baz ();
if (a)
#pragma omp for
for (i = 0; i < 10; i++)
{
if (b)
bar ();
else
baz ();
}
if (a)
{
#pragma omp taskloop
for (i = 0; i < 10; ++i)
if (b)
bar ();
}
else baz ();
if (a)
#pragma omp for collapse(2)
for (i = 0; i < 10; i++)
{
for (j = 0; j < 10; j++)
if (b)
bar ();
else
baz ();
}
if (a)
#pragma omp critical
{
if (b)
bar ();
else
baz ();
}
if (a)
for (i = 0; i < 10; i++)
#pragma omp simd
for (j = 0; j < 10; j++)
{
if (b)
bar ();
}
else
baz ();
if (a)
#pragma omp for simd schedule(dynamic, 5)
for (i = 0; i < 10; i++)
{
if (b)
bar ();
else
baz ();
}
if (a)
#pragma omp master
{
if (b)
bar ();
else
baz ();
}
if (a)
#pragma omp parallel
{
if (b)
bar ();
else
baz ();
}
if (a)
{
#pragma omp parallel
if (b)
bar ();
else
baz ();
}
if (a)
for (i = 0; i < 10; i++)
#pragma omp parallel for
for (j = 0; j < 10; j++)
{
if (b)
bar ();
}
else
baz ();
if (a)
for (i = 0; i < 10; i++)
#pragma omp parallel for simd
for (j = 0; j < 10; j++)
{
if (b)
bar ();
}
else
baz ();
if (a)
#pragma omp single
{
if (b)
bar ();
}
else
baz ();
if (a)
#pragma omp task
{
if (b)
bar ();
}
else
baz ();
if (a)
#pragma omp taskgroup
{
if (b)
bar ();
else
baz ();
}
if (a)
#pragma omp taskloop simd
for (i = 0; i < 10; i++)
{
if (b)
bar ();
else
baz ();
}
}
void
f2 (int d, int e, int f)
{
if (a) /* { dg-warning "ambiguous" } */
#pragma omp ordered
if (b)
bar ();
else
baz ();
if (d) /* { dg-warning "ambiguous" } */
#pragma omp ordered threads
if (b)
bar ();
else
baz ();
if (e)
#pragma omp ordered
{
if (b)
bar ();
else
baz ();
}
if (f)
#pragma omp ordered threads
{
if (b)
bar ();
else
baz ();
}
}
|
dispatch.c | /*
* Copyright (c) 2013 Mark Heily <mark@heily.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <pthread.h>
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
/*
* EXPERIMENTAL dispatching API
*/
void
kq_dispatch(kqueue_t kq, void (*cb)(kqueue_t, struct kevent))
{
const int maxevents = 64; /* Should be more like 2xNCPU */
struct kevent events[maxevents];
ssize_t nevents;
int i;
for (;;) {
nevents = kq_event(kq, NULL, 0, (struct kevent *) &events, maxevents, NULL);
if (nevents < 0)
abort();
#pragma omp parallel
{
for (i = 0; i < nevents; i++) {
#pragma omp single nowait
(*cb)(kq, events[i]);
}
}
}
}
|
GB_unaryop__minv_int64_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int64_fp64
// op(A') function: GB_tran__minv_int64_fp64
// C type: int64_t
// A type: double
// cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64)
// unaryop: cij = GB_IMINV_SIGNED (aij, 64)
#define GB_ATYPE \
double
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
int64_t z ; GB_CAST_SIGNED(z,x,64) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int64_fp64
(
int64_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
partial.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] -= hypre_MPI_Wtime();
#endif
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
/*HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;*/
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn;
/* Variables to keep count of interpolatory points */
/*HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter, coarse_counter_offd; */
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
/*HYPRE_Int strong_f_marker = -2;*/
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i;
/*HYPRE_Int i, ii, i1, i2, j, jj, kk, k1, jj1;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Int max_num_threads;
HYPRE_Int *P_diag_array = NULL;
HYPRE_Int *P_offd_array = NULL;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
max_num_threads = hypre_NumThreads();
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
/*P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); */
}
if (full_off_procNodes)
{
/*P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);*/
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/*hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);*/
for (i=0; i < full_off_procNodes; i++)
{
fine_to_coarse_offd[i] = -1;
tmp_CF_marker_offd[i] = -1;
}
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
P_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads+1, HYPRE_MEMORY_HOST);
P_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads+1, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, diagonal, distribute, sgn, sum)
#endif
{
HYPRE_Int ii, jj_counter, jj_counter_offd, jj, kk, i1, i2, k1, jj1;
HYPRE_BigInt big_k1;
HYPRE_Int loc_col, jj_begin_row, jj_begin_row_offd;
HYPRE_Int jj_end_row, jj_end_row_offd, strong_f_marker;
HYPRE_Int size, rest, ne, ns;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
strong_f_marker = -2;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = n_coarse_old/num_threads;
rest = n_coarse_old - size*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(size+1);
ne = (my_thread_num+1)*(size+1);
}
else
{
ns = my_thread_num*size+rest;
ne = (my_thread_num+1)*size+rest;
}
if (n_fine) P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (ii=0; ii < n_fine; ii++)
P_marker[ii] = -1;
if (full_off_procNodes) P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
for (ii=0; ii < full_off_procNodes; ii++)
P_marker_offd[ii] = -1;
/*coarse_counter = 0;
coarse_counter_offd = 0;*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
for (ii = ns; ii < ne; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
/*P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;*/
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
/*coarse_counter++;*/
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
P_diag_array[my_thread_num] = jj_counter;
P_offd_array[my_thread_num] = jj_counter_offd;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
for (i=0; i < max_num_threads; i++)
{
P_diag_array[i+1] += P_diag_array[i];
P_offd_array[i+1] += P_offd_array[i];
}
P_diag_size = P_diag_array[max_num_threads];
P_offd_size = P_offd_array[max_num_threads];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = P_diag_size;
P_offd_i[n_coarse_old] = P_offd_size;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (my_thread_num)
{
jj_counter = P_diag_array[my_thread_num-1];
jj_counter_offd = P_offd_array[my_thread_num-1];
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = ns; ii < ne; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
P_diag_i[ii] = jj_counter;
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
if(i2 == i && (sgn*A_diag_data[jj1]) < 0)
diagonal += distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row || loc_col == i)
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
if(loc_col == i)
diagonal += distribute*A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
} /* end parallel region */
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] < -1) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(P_offd_array, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialStdInterp
* Comment: The interpolatory weighting can be changed with the sep_weight
* variable. This can enable not separating negative and positive
* off diagonals in the weight formula.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int sep_weight,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
HYPRE_Int *ihat = NULL;
HYPRE_Int *ihat_offd = NULL;
HYPRE_Int *ipnt = NULL;
HYPRE_Int *ipnt_offd = NULL;
HYPRE_Int strong_f_marker = -2;
/* Interpolation weight variables */
HYPRE_Real *ahat = NULL;
HYPRE_Real *ahat_offd = NULL;
HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C;
HYPRE_Real diagonal, distribute;
HYPRE_Real alfa, beta;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, ii, i1, j1, jj, kk, k1;
HYPRE_BigInt big_k1;
HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Real wall_1 = 0;
HYPRE_Real wall_2 = 0;
HYPRE_Real wall_3 = 0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 0))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[ii])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[ii])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[ii])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < P_offd_i[ii])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < P_diag_i[ii])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < P_offd_i[ii])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = jj_counter;
P_offd_i[n_coarse_old] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
/* Initialize ahat, which is a modification to a, used in the standard
* interpolation routine. */
if (n_fine)
{
ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST);
ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
ahat[i] = 0;
ihat[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
ahat_offd[i] = 0;
ihat_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
if (debug_flag==4) wall_time = time_getWallclockSeconds();
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = i1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = k1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd]=i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] > 0)
{
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = loc_col;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(CF_marker_offd[loc_col] > 0)
{
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_1 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
cnt_c = 0;
cnt_f = jj_end_row-jj_begin_row;
cnt_c_offd = 0;
cnt_f_offd = jj_end_row_offd-jj_begin_row_offd;
ihat[i] = cnt_f;
ipnt[cnt_f] = i;
ahat[cnt_f++] = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is direct neighbor */
i1 = A_diag_j[jj];
if (P_marker[i1] != strong_f_marker)
{
indx = ihat[i1];
if (indx > -1)
ahat[indx] += A_diag_data[jj];
else if (P_marker[i1] >= jj_begin_row)
{
ihat[i1] = cnt_c;
ipnt[cnt_c] = i1;
ahat[cnt_c++] += A_diag_data[jj];
}
else if (CF_marker[i1] != -3)
{
ihat[i1] = cnt_f;
ipnt[cnt_f] = i1;
ahat[cnt_f++] += A_diag_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
{
distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]];
for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++)
{
k1 = A_diag_j[kk];
indx = ihat[k1];
if (indx > -1)
ahat[indx] -= A_diag_data[kk]*distribute;
else if (P_marker[k1] >= jj_begin_row)
{
ihat[k1] = cnt_c;
ipnt[cnt_c] = k1;
ahat[cnt_c++] -= A_diag_data[kk]*distribute;
}
else
{
ihat[k1] = cnt_f;
ipnt[cnt_f] = k1;
ahat[cnt_f++] -= A_diag_data[kk]*distribute;
}
}
if(num_procs > 1)
{
for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++)
{
k1 = A_offd_j[kk];
indx = ihat_offd[k1];
if(num_functions == 1 || dof_func[i1] == dof_func_offd[k1])
{
if (indx > -1)
ahat_offd[indx] -= A_offd_data[kk]*distribute;
else if (P_marker_offd[k1] >= jj_begin_row_offd)
{
ihat_offd[k1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = k1;
ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute;
}
else
{
ihat_offd[k1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = k1;
ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute;
}
}
}
}
}
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] != strong_f_marker)
{
indx = ihat_offd[i1];
if (indx > -1)
ahat_offd[indx] += A_offd_data[jj];
else if (P_marker_offd[i1] >= jj_begin_row_offd)
{
ihat_offd[i1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = i1;
ahat_offd[cnt_c_offd++] += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
ihat_offd[i1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = i1;
ahat_offd[cnt_f_offd++] += A_offd_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]];
for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++)
{
big_k1 = A_ext_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /*diag*/
loc_col = (HYPRE_Int)(big_k1 - col_1);
indx = ihat[loc_col];
if (indx > -1)
ahat[indx] -= A_ext_data[kk]*distribute;
else if (P_marker[loc_col] >= jj_begin_row)
{
ihat[loc_col] = cnt_c;
ipnt[cnt_c] = loc_col;
ahat[cnt_c++] -= A_ext_data[kk]*distribute;
}
else
{
ihat[loc_col] = cnt_f;
ipnt[cnt_f] = loc_col;
ahat[cnt_f++] -= A_ext_data[kk]*distribute;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(num_functions == 1 ||
dof_func_offd[loc_col] == dof_func_offd[i1])
{
indx = ihat_offd[loc_col];
if (indx > -1)
ahat_offd[indx] -= A_ext_data[kk]*distribute;
else if(P_marker_offd[loc_col] >= jj_begin_row_offd)
{
ihat_offd[loc_col] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = loc_col;
ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute;
}
else
{
ihat_offd[loc_col] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = loc_col;
ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_2 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
diagonal = ahat[cnt_c];
ahat[cnt_c] = 0;
sum_pos = 0;
sum_pos_C = 0;
sum_neg = 0;
sum_neg_C = 0;
sum = 0;
sum_C = 0;
if(sep_weight == 1)
{
for (jj=0; jj < cnt_c; jj++)
{
if (ahat[jj] > 0)
{
sum_pos_C += ahat[jj];
}
else
{
sum_neg_C += ahat[jj];
}
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos_C += ahat_offd[jj];
}
else
{
sum_neg_C += ahat_offd[jj];
}
}
}
sum_pos = sum_pos_C;
sum_neg = sum_neg_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
if (ahat[jj] > 0)
{
sum_pos += ahat[jj];
}
else
{
sum_neg += ahat[jj];
}
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos += ahat_offd[jj];
}
else
{
sum_neg += ahat_offd[jj];
}
ahat_offd[jj] = 0;
}
}
if (sum_neg_C*diagonal != 0.0) alfa = sum_neg/sum_neg_C/diagonal;
if (sum_pos_C*diagonal != 0.0) beta = sum_pos/sum_pos_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
if (ahat[j1] > 0)
P_diag_data[jj] = -beta*ahat[j1];
else
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
if (ahat_offd[j1] > 0)
P_offd_data[jj] = -beta*ahat_offd[j1];
else
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
else
{
for (jj=0; jj < cnt_c; jj++)
{
sum_C += ahat[jj];
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
sum_C += ahat_offd[jj];
}
}
sum = sum_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
sum += ahat[jj];
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
sum += ahat_offd[jj];
ahat_offd[jj] = 0;
}
}
if (sum_C*diagonal != 0.0) alfa = sum/sum_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_3 += wall_time;
fflush(NULL);
}
}
}
if (debug_flag==4)
{
hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n",
my_id, wall_1, wall_2, wall_3);
fflush(NULL);
}
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] < -1) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(ahat, HYPRE_MEMORY_HOST);
hypre_TFree(ihat, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt, HYPRE_MEMORY_HOST);
if (full_off_procNodes)
{
hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, ii, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[ii])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[ii])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[ii])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < P_offd_i[ii])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < P_diag_i[ii])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < P_offd_i[ii])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = jj_counter;
P_offd_i[n_coarse_old] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row )
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] < -1) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
|
transformations.c | #include <stdio.h>
#include "opari_omp.h"
static void log(const char* msg, int val) {
printf("--- %3d: %s", omp_get_thread_num(), msg);
if ( val != -1 ) printf(" %d", val);
printf("\n");
fflush(stdout);
}
const int iterations = 4;
int main() {
int i, k = 0;
omp_lock_t lck;
#pragma omp inst init
log("sequential", 0);
/* ---- plain parallel region ---- */
#pragma omp parallel
{
log("parallel", 0);
}
log("sequential", 1);
/* ---- large parallel region ---- */
#pragma omp parallel
{
log("parallel", 1);
#pragma omp inst begin(worksharing)
/* ---- worksharing for loop without synchronisation ---- */
#pragma omp for nowait
for(i=0; i<iterations; ++i) {
log("for nowait iteration", i);
}
/* ---- user specified barrier ---- */
#pragma omp barrier
/* ---- worksharing for loop with implicit synchronisation ---- */
#pragma omp for
for(i=0; i<iterations; ++i) {
log("for iteration", i);
}
/* ---- worksharing tasks without synchronisation ---- */
#pragma omp sections nowait
{
#pragma omp section
log("section nowait", 1);
#pragma omp section
{
log("section nowait", 2);
}
}
/* ---- worksharing tasks with implicit synchronisation ---- */
#pragma omp sections
{
#pragma omp section
{
log("section", 1);
}
#pragma omp section
log("section", 2);
}
#pragma omp inst end(worksharing)
#pragma omp inst begin(synchronisation)
/* ---- critical section ---- */
#pragma omp critical
{
log("critical\n", -1);
k += 1;
}
/* ---- named critical section ---- */
#pragma omp critical(kincr)
{
log("critical\n", -1);
k += 1;
}
/* ---- atomic expression ---- */
#pragma omp atomic
k += 1;
/* ---- update k just once without synchronisation ---- */
#pragma omp single nowait
{
log("single nowait\n", -1);
k += 1;
}
/* ---- update k just once with implicit synchronisation ---- */
#pragma omp single
{
log("single\n", -1);
k += 1;
}
#pragma omp master
{
log("master\n", -1);
printf("k = %d\n", k);
k = 0;
}
#pragma omp inst end(synchronisation)
} /* end parallel ---- */
log("sequential", 2);
#pragma omp inst begin(parallelworksharing)
/* ---- combined parallel worksharing for loop ---- */
#pragma omp parallel for \
reduction(+:k) \
private(i) \
schedule(dynamic)
for(i=0; i<iterations; ++i) {
log("pfor", i);
}
log("sequential", 3);
/* ---- combined parallel worksharing tasks ---- */
#pragma omp parallel sections
{
#pragma omp section
log("psection", 1);
#pragma omp section
log("psection", 2);
}
log("sequential", 4);
#pragma omp inst end(parallelworksharing)
/* ---- OpenMP locking API ---- */
#pragma omp inst begin(locking)
omp_init_lock(&lck);
#pragma omp parallel shared(lck)
{
omp_set_lock(&lck);
log("got lock", -1);
omp_unset_lock(&lck);
while (! omp_test_lock(&lck)) {
log("skipping", -1);
}
log("working", -1);
omp_unset_lock(&lck);
}
omp_destroy_lock(&lck);
#pragma omp flush(k)
#pragma omp inst end(locking)
return 0;
}
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N RRR L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-private.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/exception-private.h"
#include "magick/enhance.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
const Image
*next;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if ((next->columns != image->columns) || (next->rows != image->rows))
ThrowImageException(OptionError,"ImagesAreNotTheSameSize");
}
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse)
{
InheritException(exception,&combine_image->exception);
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(combine_image,sRGBColorspace);
if ((channel & OpacityChannel) != 0)
combine_image->matte=MagickTrue;
(void) SetImageBackgroundColor(combine_image);
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
PixelPacket
*pixels;
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
if (((channel & RedChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & GreenChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelGreen(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & BlueChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelBlue(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) && (next != (Image *) NULL))
{
IndexPacket
*indexes;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(combine_view);
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelIndex(indexes+x,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CombineImageTag,progress++,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (IsGrayColorspace(combine_image->colorspace) != MagickFalse)
(void) TransformImageColorspace(combine_image,sRGBColorspace);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
return(image->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImageChannel() separates a channel from the image and returns it as
% a grayscale image. A channel is a particular color component of each pixel
% in the image.
%
% The format of the SeparateImageChannel method is:
%
% MagickBooleanType SeparateImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channel to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel,
% YellowChannel, or BlackChannel.
%
*/
MagickExport Image *SeparateImage(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
Image
*separate_image;
MagickBooleanType
status;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
separate_image=CloneImage(image,0,0,MagickTrue,exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
status=SeparateImageChannel(separate_image,channel);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
}
MagickExport MagickBooleanType SeparateImageChannel(Image *image,
const ChannelType channel)
{
#define SeparateImageTag "Separate/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (channel == GrayChannels)
image->matte=MagickTrue;
/*
Separate image channels.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
switch (channel)
{
case RedChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
break;
}
case GreenChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelGreen(q));
SetPixelBlue(q,GetPixelGreen(q));
q++;
}
break;
}
case BlueChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelBlue(q));
SetPixelGreen(q,GetPixelBlue(q));
q++;
}
break;
}
case OpacityChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelOpacity(q));
SetPixelGreen(q,GetPixelOpacity(q));
SetPixelBlue(q,GetPixelOpacity(q));
q++;
}
break;
}
case BlackChannel:
{
if ((image->storage_class != PseudoClass) &&
(image->colorspace != CMYKColorspace))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelIndex(indexes+x));
SetPixelGreen(q,GetPixelIndex(indexes+x));
SetPixelBlue(q,GetPixelIndex(indexes+x));
q++;
}
break;
}
case TrueAlphaChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelAlpha(q));
SetPixelGreen(q,GetPixelAlpha(q));
SetPixelBlue(q,GetPixelAlpha(q));
q++;
}
break;
}
case GrayChannels:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,q)));
q++;
}
break;
}
default:
break;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SeparateImageChannel)
#endif
proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (channel != GrayChannels)
image->matte=MagickFalse;
(void) SetImageColorspace(image,GRAYColorspace);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% MagickBooleanType SeparateImages(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channels to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel,
% YellowChannel, or BlackChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
Image
*images,
*separate_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
if ((channel & RedChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,RedChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & GreenChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,GreenChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & BlueChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,BlueChannel);
AppendImageToList(&images,separate_image);
}
if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace))
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,BlackChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & AlphaChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,TrueAlphaChannel);
AppendImageToList(&images,separate_image);
}
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelType alpha_type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% CopyAlphaChannel, DeactivateAlphaChannel, ExtractAlphaChannel,
% OpaqueAlphaChannel, ResetAlphaChannel, SetAlphaChannel,
% ShapeAlphaChannel, and TransparentAlphaChannel.
%
*/
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelType alpha_type)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
image->matte=MagickTrue;
break;
}
case BackgroundAlphaChannel:
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
/*
Set transparent pixels to background color.
*/
if (image->matte == MagickFalse)
break;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
break;
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (q->opacity == TransparentOpacity)
{
SetPixelRed(q,pixel.red);
SetPixelGreen(q,pixel.green);
SetPixelBlue(q,pixel.blue);
}
q++;
}
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
case ShapeAlphaChannel:
{
/*
Special usage case for SeparateImageChannel(): copy grayscale color to
the alpha channel.
*/
status=SeparateImageChannel(image,GrayChannels);
image->matte=MagickTrue; /* make sure transparency is now on! */
if (alpha_type == ShapeAlphaChannel)
{
MagickPixelPacket
background;
/*
Reset all color channels to background color.
*/
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *)
NULL,&background);
(void) LevelColorsImage(image,&background,&background,MagickTrue);
}
break;
}
case DeactivateAlphaChannel:
{
image->matte=MagickFalse;
break;
}
case ExtractAlphaChannel:
{
status=SeparateImageChannel(image,TrueAlphaChannel);
image->matte=MagickFalse;
break;
}
case RemoveAlphaChannel:
case FlattenAlphaChannel:
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
/*
Flatten image pixels over the background pixels.
*/
if (image->matte == MagickFalse)
break;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
break;
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
opacity;
gamma=1.0-QuantumScale*QuantumScale*q->opacity*pixel.opacity;
opacity=(double) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
q->red=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->red,
(MagickRealType) q->opacity,(MagickRealType) pixel.red,
(MagickRealType) pixel.opacity));
q->green=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->green,
(MagickRealType) q->opacity,(MagickRealType) pixel.green,
(MagickRealType) pixel.opacity));
q->blue=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->blue,
(MagickRealType) q->opacity,(MagickRealType) pixel.blue,
(MagickRealType) pixel.opacity));
q->opacity=ClampToQuantum(opacity);
q++;
}
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case ResetAlphaChannel: /* deprecated */
case OpaqueAlphaChannel:
{
status=SetImageOpacity(image,OpaqueOpacity);
break;
}
case SetAlphaChannel:
{
if (image->matte == MagickFalse)
status=SetImageOpacity(image,OpaqueOpacity);
break;
}
case TransparentAlphaChannel:
{
status=SetImageOpacity(image,TransparentOpacity);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
return(SyncImagePixelCache(image,&image->exception));
}
|
parallel_measurement.c | /*
Calculating the value of pi using reduction : Parallel Implementation
Author : Omkar Damle.
Date : August 2016.
*/
#include<stdio.h>
#include<math.h>
#include<omp.h>
#include<time.h>
#include<string.h>
#include<stdlib.h>
// Using the MONOTONIC clock
#define CLK CLOCK_MONOTONIC
/* Function to compute the difference between two points in time */
struct timespec diff(struct timespec start, struct timespec end);
/*
Function to computes the difference between two time instances
Taken from - http://www.guyrutenberg.com/2007/09/22/profiling-code-using-clock_gettime/
Further reading:
http://stackoverflow.com/questions/6749621/how-to-create-a-high-resolution-timer-in-linux-to-measure-program-performance
http://stackoverflow.com/questions/3523442/difference-between-clock-realtime-and-clock-monotonic
*/
struct timespec diff(struct timespec start, struct timespec end){
struct timespec temp;
if((end.tv_nsec-start.tv_nsec)<0){
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
int main(int argc, char* argv[])
{
struct timespec start_e2e, end_e2e, start_alg, end_alg, e2e, alg;
/* Should start before anything else */
clock_gettime(CLK, &start_e2e);
/* Check if enough command-line arguments are taken in. */
if(argc < 3){
printf( "Usage: %s n p \n", argv[0] );
return -1;
}
int n=atoi(argv[1]); /* size of input array */
int p=atoi(argv[2]); /* number of processors*/
char *problem_name = "matrix_multiplication";
char *approach_name = "omp_parallel";
// char buffer[10];
// FILE* inputFile;
FILE* outputFile;
// inputFile = fopen(argv[3],"r");
char outputFileName[50];
sprintf(outputFileName,"output/%s_%s_%s_%s_output.txt",problem_name,approach_name,argv[1],argv[2]);
int *a[n],*b[n],*c[n];
//counters for loops
int i,j,k,l,m=0;
//putting values in the matrices;
for(i = 0;i < n;i++){
a[i] = (int *) malloc(n * sizeof(int));
b[i] = (int *) malloc(n * sizeof(int));
c[i] = (int *) malloc(n * sizeof(int));
for(j = 0; j < n; j++){
a[i][j] = 1;
b[i][j] = 1;
c[i][j] = 0;
}
}
//int a[n][n],b[n][n],c[n][n];
//int i,j,k;
//for(i=0;i<n;i++){ //Initialise all three arrays
// for(j=0;j<n;j++){
// a[i][j] = 1;
// b[i][j] = 1;
// c[i][j] = 0;
// }
//}
//Setting parameters for parallelizing the code
clock_gettime(CLK, &start_alg); /* Start the algo timer */
/*----------------------Core algorithm starts here----------------------------------------------*/
int block_size = n/2;
omp_set_num_threads(p);
//Matrix multiplication
for (i = 0; i < n; i += block_size)
{
for (j = 0; j < n; j += block_size)
{
#pragma omp parallel for private(k,l,m) collapse(2)
for (k = 0; k < block_size; k++)
{
// printf("%d\n", m);
for (l = 0; l < block_size; l++)
{
for (m = 0; m < n; m++)
{
#pragma omp critical
// printf("(%d,%d,%d,%d,%d)\n",i,j,k,l,m );
c[i + k][j + l] += a[i + k][m] * b[m][j + l];
}
}
}
}
}
/*
double str = sqrt(p); // find square root of the number of the cores so that we can approximately partition the block
int in = str;
if(in==0)
in++;
if(in+0.5<=str)
in++;
str = n/(double)in;
int len = str;
if(len+0.5<=str)
len++;
if(len==0)
len++; // Here len variable is the block size
//printf("%d %d\n",in,len);
clock_gettime(CLK, &start_alg);
#pragma omp parallel private(i,j,k) //Block algorithm parallelization
{
int nm = omp_get_thread_num(); // get index of the thread so that we can distinguish each thread and give them paritcular are of work
//printf("%d",nm);
int st = (nm/in)*len;
int vvv = 0,yu,endd,ppp;
if(n-st<=len)
vvv = 1;
if(vvv==0){
for(i=st;i<st+len;i++){ // starting point and ending point of the block (column number)
yu = (nm%in)*len;
endd = yu+len;
if(n-endd<len)
endd = n;
for(j=yu;j<endd;j++){ // starting point and ending point of the block (row number)
for(k=0;k<n;k++)
c[i][j]+=a[i][k]*b[k][j];
}
}
}
else{
for(ppp = st+nm%in;ppp<n;ppp++){ // This is for the case where distribution is not linear so remaining all threads work on these loops
for(j=0;j<n;j++){
c[ppp][j] = 0;
for(k=0;k<n;k++)
c[ppp][j]+=a[ppp][k]*b[k][ppp];
}
}
}
}
*/
/*----------------------Core algorithm finished--------------------------------------------------*/
clock_gettime(CLK, &end_alg); /* End the algo timer */
/* Ensure that only the algorithm is present between these two
timers. Further, the whole algorithm should be present. */
/* Should end before anything else (printing comes later) */
clock_gettime(CLK, &end_e2e);
e2e = diff(start_e2e, end_e2e);
alg = diff(start_alg, end_alg);
// /*-----------REMOVE THIS SEGMENT. ONLY FOR DEBUGGING----------------*/
// for(i=0;i<n;i++){
// for(j=0;j<n;j++)
// printf("%d ", c[i][j]);
// printf("\n");
// }
// /*-------------------------------------------------------------------*/
outputFile = fopen(outputFileName,"w");
// fprintf(outputFile,"%.8f\n",pi);
/* problem_name,approach_name,n,p,e2e_sec,e2e_nsec,alg_sec,alg_nsec
Change problem_name to whatever problem you've been assigned
Change approach_name to whatever approach has been assigned
p should be 0 for serial codes!!
*/
printf("%s,%s,%d,%d,%ld,%ld,%ld,%ld\n", problem_name, approach_name, n, p, e2e.tv_sec, e2e.tv_nsec, alg.tv_sec, alg.tv_nsec);
return 0;
}
|
fasta2fastq.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <getopt.h>
#include <assert.h>
#include <time.h>
#include "file_buffer.h"
#include "fasta2fastq.h"
#include "lineindex_lib.h"
#include <ctype.h>
#include <omp.h>
runtime_options options;
typedef struct {
size_t size;
size_t used;
char * base;
} thread_buffer;
thread_buffer * thread_buffers;
void to_ascii_33(char * s) {
char * prev=s;
char * current=s;
int index=0;
while (isspace(*current) && *current!='\0') { current++; };
while (*current!='\0') {
if (*current==' ') {
*current='\0';
int temp=atoi(prev);
s[index++]=(char)(34+(char)temp);
current++;
prev=current;
while (isspace(*current) && *current!='\0') { current++; };
} else {
current++;
}
}
//last one
if (*prev!='\0') {
int temp=atoi(prev);
s[index++]=(char)(34+(char)temp);
}
s[index]='\0';
return;
}
static void inline update_last_line(lineindex_table * li, file_buffer * fb, int lines_processed ) {
//size_t lines_processed=li->end-li->start;
char * qual_last_line=li->table[(li->start+lines_processed-1)%li->size];
//fprintf(stderr,"last line |%s|\n",qual_last_line);
//fprintf(stderr,"last %lu\n",qual_last_line+strlen(qual_last_line)-fb->base);
size_t qual_last_line_mod=qual_last_line-fb->base;
size_t qual_start_mod=fb->unseen_start%fb->size;
if (qual_last_line_mod >= qual_start_mod) {
fb->unseen_start+=qual_last_line_mod-qual_start_mod;
} else {
fb->unseen_start+=fb->size-(qual_start_mod-qual_last_line_mod);
}
while(fb->base[fb->unseen_start%fb->size]!='\0' && fb->unseen_start<fb->unseen_end) {
fb->unseen_start++;
}
//while(fb->base[fb->unseen_start%fb->size]=='\0' && fb->unseen_start<fb->unseen_end) {
// fb->unseen_start++;
//}
return;
}
void usage(char * s) {
fprintf(stderr,
"usage: %s [options/parameters] <fasta> <qual>\n", s);
fprintf(stderr,
" <fasta> The fasta filename to read in\n");
fprintf(stderr,
" <qual> The qual file corresponding to the fasta file\n");
fprintf(stderr,
"Required:\n");
fprintf(stderr,
" --qv-offset The ASCII offset for the integer values in the qual file\n");
fprintf(stderr,
"Parameters: (all sizes are in bytes unless specified)\n");
fprintf(stderr,
" --buffer-size File buffer size in memory per file (Default: %d)\n",DEF_BUFFER_SIZE);
fprintf(stderr,
" --read-size Read size, read into buffer with this (Default: %d)\n",DEF_READ_SIZE);
fprintf(stderr,"\nOptions:\n");
fprintf(stderr,
" --help This usage screen\n");
exit(1);
}
struct option long_op[] =
{
{"help", 0, 0, 5},
{"buffer-size", 1, 0, 6},
{"read-size", 1, 0, 7},
{"qv-offset",1,0,8},
{0,0,0,0}
};
static inline bool fill_fb(file_buffer * fb) {
time_t io_start_time=time(NULL);
fprintf(stderr,"IO start ... ");
bool has_changed=false;
while (!fb->exhausted) {
fill_read_buffer(&fb->frb);
add_read_buffer_to_main(fb);
if (!fb->exhausted && !fb->changed && fb->frb.eof==0) {
fprintf(stderr,"too small buffer!\n");
exit(1);
}
has_changed=has_changed || fb->changed;
}
fprintf(stderr,"IO end ... %lu seconds\n",(time(NULL)-io_start_time));
return has_changed;
//fprintf(stdout,"Filled %lu to %lu of %lu |%s|\n",fb->unseen_start, fb->unseen_end, fb->size,fb->base);
}
static void inline fill_fb_and_index(lineindex_table * li, lineindex_table ** thread_lineindexes, file_buffer * fb) {
size_t old_em=fb->unseen_end%fb->size;
if (fill_fb(fb)) {
size_t newly_added;
size_t em=fb->unseen_end%fb->size;
if (em > old_em) {
newly_added=add_lineindex_from_memory_threaded(li, thread_lineindexes,fb->base+old_em, em-old_em,options.threads, '#');
} else {
newly_added=add_lineindex_from_memory_threaded(li, thread_lineindexes,fb->base+old_em, fb->size-old_em,options.threads, '#');
newly_added+=add_lineindex_from_memory_threaded(li, thread_lineindexes,fb->base, em,options.threads, '#');
}
if (newly_added>0) {
fb->exhausted=false;
}
}
return;
}
static size_t inline string_to_byte_size(char * s) {
char * x=s;
while (isdigit(x[0])) {x++;};
size_t multiplier=1;
if (*x=='K') {
multiplier=1024;
} else if (*x=='M') {
multiplier=1024*1024;
} else if (*x=='G') {
multiplier=1024*1024*1024;
}
char old_x=*x;
*x='\0';
int ret=atoi(s);
*x=old_x;
if (ret<=0) {
return 0;
}
return ret*multiplier;
}
int main (int argc, char ** argv) {
options.buffer_size=DEF_BUFFER_SIZE;
options.read_size=DEF_READ_SIZE;
options.threads=1;
options.qv_offset=DEF_QV_OFFSET;
int op_id;
char short_op[] = "N:";
char c = getopt_long(argc, argv, short_op, long_op, &op_id);
while (c != EOF) {
switch (c) {
case 6:
options.buffer_size=string_to_byte_size(optarg);
break;
case 7:
options.read_size=string_to_byte_size(optarg);
break;
case 5:
usage(argv[0]);
break;
case 8:
options.qv_offset=atoi(optarg);
break;
case 'N':
options.threads=atoi(optarg);
break;
default:
fprintf(stderr,"%d : %c , %d is not an option!\n",c,(char)c,op_id);
usage(argv[0]);
break;
}
c = getopt_long(argc, argv, short_op, long_op, &op_id);
}
if (options.qv_offset<=0) {
fprintf(stderr,"Please specify a qv_offset. This is used when converting qual files into fastq format.\nFor SOLiD data this value will be most likely 34.\nFor Illumina data this value will be most likely 64, except for Illumina 1.8+ when it is 33.\n");
usage(argv[0]);
exit(1);
}
fprintf(stderr,"Set to %d threads!\n",options.threads);
if (argc<=optind+1) {
fprintf(stderr,"Please specify reads file and at least one sam file!\n");
usage(argv[0]);
}
argc-=optind;
argv+=optind;
if (argc!=2) {
fprintf(stderr,"Please specify both a fasta and qual file!\n");
usage(argv[0]);
}
//Variables for IO of read names
char * fasta_filename=argv[0];
char * qual_filename=argv[1];
fprintf(stderr,"Using %s as fasta reads filename and %s as qual filename\n",fasta_filename,qual_filename);
argc-=2;
argv+=2;
lineindex_table * qual_thread_lineindexes[options.threads];
int i;
for (i=0; i<options.threads; i++) {
qual_thread_lineindexes[i]=lineindex_init(1);
}
//master table
lineindex_table * qual_li = lineindex_init(1);
lineindex_table * fasta_thread_lineindexes[options.threads];
for (i=0; i<options.threads; i++) {
fasta_thread_lineindexes[i]=lineindex_init(1);
}
//master table
lineindex_table * fasta_li = lineindex_init(1);
//set up the thread_buffers
thread_buffers=(thread_buffer*)malloc(sizeof(thread_buffer)*options.threads);
if (thread_buffers==NULL) {
fprintf(stderr,"Failed to malloc memory for thread buffers!\n");
exit(1);
}
for (i=0; i<options.threads; i++ ) {
thread_buffers[i].size=(options.buffer_size/options.threads+1000)*1.3;
thread_buffers[i].base=(char*)malloc(sizeof(char)*thread_buffers[i].size);
if (thread_buffers[i].base==NULL) {
fprintf(stderr,"Failed to allocate memory for thread buffers!\n");
exit(1);
}
}
//get the hit list, process it, do it again!
fprintf(stderr,"Setting up buffer with size %lu and read_size %lu\n",options.buffer_size,options.read_size);
file_buffer * qual_fb = fb_open(qual_filename,options.buffer_size,options.read_size);
file_buffer * fasta_fb = fb_open(fasta_filename,options.buffer_size,options.read_size);
size_t lines_processed=0;
clock_t start_time=clock();
clock_t last_time=clock();
size_t iterations=0;
bool first_loop=true;
while (lines_processed!=0 || first_loop) {
first_loop=false;
//index lines
fill_fb_and_index(qual_li, qual_thread_lineindexes,qual_fb);
fill_fb_and_index(fasta_li, fasta_thread_lineindexes,fasta_fb);
lines_processed=qual_li->end-qual_li->start;
//print lines
//figure out which thread handles which
int lines_to_print[options.threads];
int start[options.threads];
for (i=0; i<options.threads; i++) {
start[i]=(i==0 ? 0 : start[i-1]+lines_to_print[i-1]);
lines_to_print[i]=lines_processed/options.threads+(lines_processed%options.threads > i ? 1 : 0);
//fprintf(stderr,"Thread %d , start %d , lines %d\n",i,start[i],lines_to_print[i]);
}
#pragma omp parallel
{
int thread_id = omp_get_thread_num();
thread_buffer * ob = thread_buffers+thread_id;
ob->used=0;
ob->base[0]='\0';
int i;
for (i=start[thread_id]; i<start[thread_id]+lines_to_print[thread_id]; i++) {
//fprintf(stderr,"Running %d on %d, %d\n",thread_id,i,(fasta_li->start+i)%fasta_li->size);
//fprintf(stdout,"@%s\n",fasta_li->table[(fasta_li->start+i)%fasta_li->size]+1);
char * to_print=fasta_li->table[(fasta_li->start+i)%fasta_li->size];
char * qual_string=qual_li->table[(qual_li->start+i)%qual_li->size];
//fprintf(stderr,"F |%s| vs |%s| \n",to_print,qual_string);
while (strlen(to_print)+strlen(qual_string)+ob->used>ob->size) {
ob->size*=1.3;
ob->base=(char*)realloc(ob->base,sizeof(char)*ob->size);
if (ob->base==NULL) {
fprintf(stderr,"Failed to allocate memory for thread_buffer expand\n");
exit(1);
}
}
//qual string moves between read names and quals, check if we are printing a read name or qual
if (qual_string[0]=='>') {
ob->used+=sprintf(ob->base+ob->used,"@%s\n",fasta_li->table[(fasta_li->start+i)%fasta_li->size]+1);
} else {
to_ascii_33(qual_li->table[(qual_li->start+i)%qual_li->size]);
ob->used+=sprintf(ob->base+ob->used,"%s\n",fasta_li->table[(fasta_li->start+i)%fasta_li->size]);
ob->used+=sprintf(ob->base+ob->used,"+\n%s\n",qual_li->table[(qual_li->start+i)%qual_li->size]);
}
//fprintf(stderr,"%s and %s\n",to_print,qual_string);
//assert(qual_string[0]!='>');
}
}
//print the blocks
for (i=0; i<options.threads; i++) {
fprintf(stdout,"%s",thread_buffers[i].base);
}
update_last_line(qual_li,qual_fb,lines_processed);
update_last_line(fasta_li,fasta_fb,lines_processed);
//fprintf(stderr,"%lu %lu\n",qual_fb->unseen_start,qual_fb->unseen_end);
//fprintf(stderr,"END OF IT |%s|\n",qual_fb->base+qual_fb->unseen_start%qual_fb->size);
qual_li->start+=lines_processed;
fasta_li->start+=lines_processed;
if (lines_processed>0) {
qual_fb->exhausted=false;
fasta_fb->exhausted=false;
}
iterations++;
if ( (clock()-last_time)/options.threads > CLOCKS_PER_SEC/4) {
double lines_per_second=qual_li->start/( (double)(clock()-start_time)/(CLOCKS_PER_SEC*options.threads));
double lines_per_iteration=qual_li->start/(double)iterations;
fprintf(stderr,"Processing overall at %lf reads / second, %lf reads / iteration, processed %lu, lines on this iteration %lu\n",lines_per_second,lines_per_iteration,qual_li->start,lines_processed);
last_time=clock();
}
}
//free the line-indexes
for (i=0; i<options.threads; i++) {
lineindex_destroy(qual_thread_lineindexes[i]);
lineindex_destroy(fasta_thread_lineindexes[i]);
}
//free the master index
lineindex_destroy(qual_li);
lineindex_destroy(fasta_li);
//close the file_buffers
fb_close(qual_fb);
fb_close(fasta_fb);
return 0;
}
|
MM1fu.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "matrixUtils/matrixUtils.h"
#include "benchmarkUtils/timeUtils.h"
// Reserva de memoria
#define SIZE_DATA (1024*1024*64*3)
static double MEM_CHUNK[SIZE_DATA];
// Version 6. version del algoritmo 2 filas por 2 filas.
int main(int argc, char **argv){
int N = (int) atoi(argv[1]); // matrix size NxN
int NUM_T = (int) atoi(argv[2]); //number of threads
//#pragma omp parallel
int i, j, k;
double *matrixA, *matrixB, *matrixC;
matrixA = MEM_CHUNK;
matrixB = matrixA + (N * N);
matrixC = matrixB + (N * N);
// The main process make the init routines
//#pragma omp master
matrixInitN(N, matrixA, matrixB, matrixC);
// printf("Matrix A: \n");
// matrixPrint(N, N, matrixA);
// printf("Matrix B: \n");
// matrixPrint(N, N, matrixB);
omp_set_num_threads(NUM_T);
sampleStart();
// Test matrix multiplication with OpenMP
#pragma omp parallel for
for(i=0; i<N; i++){
for(j=0; j<N; j++){
double *ptra, *ptrb;
double c0, c1, c2, c3;
c0 = c1 = c2 =c3 = 0.0;
ptra = matrixA + (i*N);
ptrb = matrixB + (j*N);
k = N;
while(k&3){
c0 += (*ptra * *ptrb);
k--;
ptra++, ptrb++;
}
for(; k>0; k-=4, ptra+=4, ptrb+=4){
c0 += (*ptra * *ptrb);
c1 += *(ptra+1) * *(ptrb+1);
c2 += *(ptra+2) * *(ptrb+2);
c3 += *(ptra+3) * *(ptrb+3);
}
matrixC[j+i*N] = c0 + c1 + c2 + c3;
}
}
sampleStop();
// printf("Matrix C: \n");
// matrixPrint(N, N, matrixC);
printTime();
return 0;
}
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float *m = calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(TA && !TB)
gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(!TA && TB)
gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else
gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
#ifdef DNETGPU
#include <math.h>
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
check_error(status);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_gpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaThreadSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,192,729,1600);
time_gpu(0,0,384,196,1728);
time_gpu(0,0,256,196,3456);
time_gpu(0,0,256,196,2304);
time_gpu(0,0,128,4096,12544);
time_gpu(0,0,128,4096,4096);
*/
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,576,12544);
time_gpu(0,0,256,2304,784);
time_gpu(1,1,2304,256,784);
time_gpu(0,0,512,4608,196);
time_gpu(1,1,4608,512,196);
return 0;
}
#endif
|
GB_binop__bclr_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bclr_uint64
// A.*B function (eWiseMult): GB_AemultB__bclr_uint64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bclr_uint64
// C+=b function (dense accum): GB_Cdense_accumb__bclr_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_uint64
// C=scalar+B GB_bind1st__bclr_uint64
// C=scalar+B' GB_bind1st_tran__bclr_uint64
// C=A+scalar GB_bind2nd__bclr_uint64
// C=A'+scalar GB_bind2nd_tran__bclr_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_BITCLR (aij, bij, uint64_t, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITCLR (x, y, uint64_t, 64) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_UINT64 || GxB_NO_BCLR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bclr_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bclr_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bclr_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bclr_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bclr_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bclr_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = GB_BITCLR (x, bij, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bclr_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = GB_BITCLR (aij, y, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (x, aij, uint64_t, 64) ; \
}
GrB_Info GB_bind1st_tran__bclr_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (aij, y, uint64_t, 64) ; \
}
GrB_Info GB_bind2nd_tran__bclr_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
forward_increment_lagrange_multiplier_scheme.h | //
// Project Name: Kratos
// Last Modified by: $Author: Nelson $
// Date: $Date: 2011-01-21 $
// Revision: $Revision: 1.0 $
//
//
#if !defined(FORWARD_INCREMENT_LAGRANGE_MULTIPLIER_SCHEME )
#define FORWARD_INCREMENT_LAGRANGE_MULTIPLIER_SCHEME
// System includes
#include <string>
#include <iostream>
#include <cmath>
/* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
// External includes
#include "boost/smart_ptr.hpp"
// Project includes
#include "includes/define.h"
#include "includes/ublas_interface.h"
#include "includes/model_part.h"
#include "utilities/math_utils.h"
#include "custom_utilities/sd_math_utils.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class ForwardIncrementLagrangeMultiplierScheme
{
public:
///@name Type Definitions
///@{
typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType;
typedef ConditionsContainerType::iterator ConditionsContainerIterator;
typedef ConditionsContainerType::value_type ConditionsPointerType;
/// Pointer definition of ForwardIncrementLagrangeMultiplierScheme
KRATOS_CLASS_POINTER_DEFINITION(ForwardIncrementLagrangeMultiplierScheme);
///@}
///@name Life Cycle
///@{
/// Default constructor.
ForwardIncrementLagrangeMultiplierScheme() {}
ForwardIncrementLagrangeMultiplierScheme(ModelPart& model_part, const unsigned int& dimension
) : mr_model_part(model_part), mrdimension(dimension)
{
}
/// Destructor.
virtual ~ForwardIncrementLagrangeMultiplierScheme() {}
void CalculateContactForceAndDisplacementCorrections(
const double& alfa_damp,
const double& mid_time_step,
const ConditionsContainerIterator& end_previos,
const ConditionsContainerIterator& end_actual
)
{
std::cout<<std::endl;
std::cout<< " Simultaneous Jacobi Iteration Method" << std::endl;
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
//ConditionsContainerType& pConditions = mr_model_part.ConditionsArray();
//const double current_delta_time = CurrentProcessInfo[DELTA_TIME];
const unsigned int max = 200;
unsigned int iter = 0;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> condition_partition;
int distance = std::distance(end_previos, end_actual);
CreatePartition(number_of_threads, distance, condition_partition);
double Rigth_Term, Left_Term;
double Old_Left_Term, Old_Rigth_Term;
double relative_error, relative_error1, relative_error2 ;
bool is_converged = false;
bool is_converged_1 = false;
bool is_converged_2 = false;
const double EPS = 1E-9;
const double ERROR = 1E-6;
Rigth_Term = 0.00;
Left_Term = 0.00;
Old_Left_Term = 0.00;
Old_Rigth_Term = 0.00;
relative_error = 0.00;
relative_error1 = 0.00;
relative_error2 = 0.00;
while(is_converged ==false && ++iter < max )
{
//STEP1
#pragma omp parallel for //shared(alfa_damp, mid_time_step, CurrentProcessInfo)
for(int k=0; k<number_of_threads; k++)
{
ConditionsContainerType::iterator it_begin = end_previos + condition_partition[k];
ConditionsContainerType::iterator it_end = end_previos + condition_partition[k+1];
for(ConditionsContainerType::iterator it= it_begin; it!=it_end; ++it)
JacobiIteration(alfa_damp, mid_time_step, *it, CurrentProcessInfo);
}
//STEP2
UpadateDisplacement();
//STEP3
Old_Left_Term = Left_Term;
Old_Rigth_Term = Rigth_Term;
Rigth_Term = 0.00;
Left_Term = 0.00;
#pragma omp parallel for reduction(+:Rigth_Term) reduction(+:Left_Term)
for(int k=0; k<number_of_threads; k++)
{
ConditionsContainerType::iterator it_begin=end_previos + condition_partition[k];
ConditionsContainerType::iterator it_end=end_previos+condition_partition[k+1];
for (ConditionsContainerType::iterator it= it_begin; it!=it_end; ++it)
CkeckConvergence(*it, Rigth_Term, Left_Term);
}
Rigth_Term = std::sqrt(Rigth_Term);
Left_Term = std::sqrt(Left_Term);
if(Left_Term!=0.00)
relative_error1 = std::fabs((Left_Term - Old_Left_Term ) / ( Left_Term));
else
relative_error1 = 0.00;
if(Rigth_Term!=0.00)
relative_error2 = std::fabs((Rigth_Term - Old_Rigth_Term) / ( Rigth_Term));
else
relative_error2 = 0.00;
relative_error = relative_error1 + relative_error2;
is_converged_1 = IsConverged(EPS, Rigth_Term, Left_Term);
is_converged_2 = (relative_error < ERROR);
if( is_converged_1==true || is_converged_2==true)
is_converged = true;
}
MoveMeshAgain();
std::cout << " Number of Iterations = " << iter << std::endl;
std::cout << " Tolerance ( EPS ) = " << EPS << std::endl;
std::cout << " Required Tolerance = " << EPS*Rigth_Term << std::endl;
std::cout << " Achieved Tolerance = " << Left_Term << std::endl;
std::cout << " Relative Error = " << relative_error << std::endl;
if (iter==max)
std::cout<< " NOT CONVERGENCE FOR CONTACT!!!!!!!!" << std::endl;
std::cout<< std::endl;
}
/// Compute Lamdas and Contact Dislplacement
void JacobiIteration( const double& alfa_damp,
const double& mid_time_step,
const ConditionsPointerType& rCond, ProcessInfo& CurrentProcessInfo)
{
const double current_delta_time = CurrentProcessInfo[DELTA_TIME];
double lamda_old = 0.00;
Vector Constraint;
Matrix Constraint_Matrix;
Matrix Mass;
Matrix InvMass;
Matrix Aux;
Matrix InvAux;
Vector Displ;
Vector& lambdas = (rCond)->GetValue(LAMBDAS);
Vector& delta_lambdas = (rCond)->GetValue(DELTA_LAMBDAS);
ComputeConstraintVector(rCond, Constraint);
Constraint_Matrix.resize(1, Constraint.size());
noalias(Constraint_Matrix) = ZeroMatrix(1, Constraint.size());
for(unsigned int i = 0; i<Constraint.size(); i++ )
Constraint_Matrix(0,i) = Constraint[i];
rCond->CalculateMassMatrix(Mass, CurrentProcessInfo);
double auxmass = 0.00;
for(unsigned int i = 0; i<Mass.size1(); i++)
{
auxmass = Mass(i,i);
Mass(i,i) = ((1.00/mid_time_step) + (alfa_damp*current_delta_time)/(2.00 * mid_time_step))*auxmass;
}
InvertDiagonalMatrix(Mass , InvMass);
Aux.resize(Constraint_Matrix.size1(), Constraint_Matrix.size1(), false);
Aux = ZeroMatrix(Constraint_Matrix.size1(), Constraint_Matrix.size1());
noalias(Aux) = prod(Matrix(prod(Constraint_Matrix,InvMass)), (trans(Constraint_Matrix)));
InvAux.resize(Aux.size1(), Aux.size1(),false);
InvAux = ZeroMatrix(Aux.size1(), Aux.size1());
SD_MathUtils<double>::InvertMatrix(Aux,InvAux);
GetNodeDisplacement(rCond, Displ);
noalias(delta_lambdas) = (1.00/(current_delta_time)) * prod(InvAux, Vector( prod(Constraint_Matrix, Displ) ) );
lamda_old = lambdas[0];
for(unsigned int i = 0; i<lambdas.size(); i++)
lambdas[i] = lambdas[i] + delta_lambdas[i];
if (lambdas[0] > 0.00)
{
lambdas[0] = 0.00;
delta_lambdas[0] = -lamda_old;
}
if(lambdas[0] < 1E-6)
CalculateContactDisplacement(rCond, delta_lambdas, Constraint, InvMass);
}
void CkeckConvergence(const ConditionsPointerType& rCond, double& Rigth_Term,
double& Left_Term)
{
Vector& delta_lambdas = (rCond)->GetValue(DELTA_LAMBDAS);
Vector& lambdas = (rCond)->GetValue(LAMBDAS);
Left_Term += norm_2(delta_lambdas);
Rigth_Term += norm_2(lambdas);
unsigned int size_delta_lambdas = delta_lambdas.size();
noalias(delta_lambdas) = ZeroVector(size_delta_lambdas);
}
bool IsConverged(const double& EPS, const double& Rigth_Term, const double& Left_Term)
{
return (Left_Term <= EPS*Rigth_Term);
}
void InvertDiagonalMatrix(const Matrix& rMatrix ,Matrix& rResult)
{
int size = rMatrix.size1();
rResult.resize(size, size, false);
rResult = ZeroMatrix(size, size);
for (unsigned int i = 0; i<rMatrix.size1(); i++ )
rResult(i,i) = 1.00 / rMatrix(i,i);
return;
}
void CheckMatrix(Matrix& rMatrix)
{
const double toler = 1.0E-10;
for (unsigned int i = 0; i<rMatrix.size1(); i++ )
if(std::fabs(rMatrix(i,i)) < toler)
{
rMatrix(i,i) = 1E-14;
}
return;
}
void GetNodeDisplacement(const ConditionsPointerType& rCond, Vector& Displ)
{
Condition::GeometryType& geom = rCond->GetGeometry();
const unsigned int dimension = geom.WorkingSpaceDimension();
const unsigned int dim2 = geom.size()*dimension;
Displ.resize(dim2, false);
noalias(Displ) = ZeroVector(dim2);
unsigned int count = 0;
if(dimension==2)
{
for(unsigned int i = 0; i<geom.size(); i++)
{
Displ[count] = geom[i].X0() + geom[i].GetSolutionStepValue(DISPLACEMENT_X); // geom[i].X(); // actual_displacement[0];
Displ[count+1] = geom[i].Y0() + geom[i].GetSolutionStepValue(DISPLACEMENT_Y); // geom[i].Y(); // actual_displacement[1];
count += 2;
}
}
else
{
for(unsigned int i = 0; i<geom.size(); i++)
{
Displ[count] = geom[i].X0() + geom[i].GetSolutionStepValue(DISPLACEMENT_X); // geom[i].X(); // actual_displacement[0];
Displ[count+1] = geom[i].Y0() + geom[i].GetSolutionStepValue(DISPLACEMENT_Y); // geom[i].Y(); // actual_displacement[1];
Displ[count+2] = geom[i].Z0() + geom[i].GetSolutionStepValue(DISPLACEMENT_Z);
count +=3;
}
}
return;
}
/// Computa el incremento de desplazamiento producido por el contacto
void CalculateContactDisplacement( const ConditionsPointerType& rCond,
const Vector& delta_lambdas,
const Vector& Constraint,
const Matrix& InvMass)
{
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
Condition::GeometryType& geom = rCond->GetGeometry();
const unsigned int dimension = geom.WorkingSpaceDimension();
const unsigned int dim2 = geom.size()*dimension;
const double current_delta_time = CurrentProcessInfo[DELTA_TIME];
Vector Displ;
Displ.resize(dim2, false);
noalias(Displ) = ZeroVector(dim2);
noalias(Displ) = -current_delta_time * delta_lambdas[0] * prod(InvMass, Constraint);
unsigned int count = 0;
if(dimension==2)
{
for(unsigned int i = 0; i<geom.size(); i++)
{
geom[i].SetLock();
array_1d<double,3>& Contact_Displ = geom[i].FastGetSolutionStepValue(DISPLACEMENT_OLD); ///CONTACT DISPLACEMENT
Contact_Displ[0] = Contact_Displ[0] + Displ[0+count];
Contact_Displ[1] = Contact_Displ[1] + Displ[1+count];
Contact_Displ[2] = 0.00;
count = count + 2;
geom[i].UnSetLock();
}
}
else
{
for(unsigned int i = 0; i<geom.size(); i++)
{
geom[i].SetLock();
array_1d<double,3>& Contact_Displ = geom[i].FastGetSolutionStepValue(DISPLACEMENT_OLD); /// CONTACT DISPLACEMENT
Contact_Displ[0] = Contact_Displ[0] + Displ[0+count];
Contact_Displ[1] = Contact_Displ[1] + Displ[1+count];
Contact_Displ[2] = Contact_Displ[2] + Displ[2+count];
count = count + 3;
geom[i].UnSetLock();
}
}
return;
}
void ComputeConstraintVector(const ConditionsPointerType& rCond, Vector& Constraint)
{
Condition::GeometryType& geom = rCond->GetGeometry();
const unsigned int dimension = geom.WorkingSpaceDimension();
const unsigned int dim2 = geom.size()*dimension;
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
Constraint.resize(dim2, false);
noalias(Constraint) = ZeroVector(dim2);
rCond->Calculate(CONSTRAINT_VECTOR, Constraint, CurrentProcessInfo);
}
void UpadateDisplacement()
{
KRATOS_TRY
for(ModelPart::NodeIterator i = mr_model_part.NodesBegin() ;
i != mr_model_part.NodesEnd() ; ++i)
{
array_1d<double,3>& Contact_Displ = i->FastGetSolutionStepValue(DISPLACEMENT_OLD);
array_1d<double,3>& actual_displacement = i->FastGetSolutionStepValue(DISPLACEMENT);
if( (i->pGetDof(DISPLACEMENT_X))->IsFixed() == false )
{
actual_displacement[0] = actual_displacement[0] + Contact_Displ[0];
}
if( (i->pGetDof(DISPLACEMENT_Y))->IsFixed() == false )
{
actual_displacement[1] = actual_displacement[1] + Contact_Displ[1];
}
if (mrdimension==3)
{
if( (i->pGetDof(DISPLACEMENT_Z))->IsFixed() == false )
{
actual_displacement[2] = actual_displacement[2] + Contact_Displ[2];
}
}
Contact_Displ = ZeroVector(3);
}
KRATOS_CATCH("")
}
/// WARNING = To be parallel
void MoveMeshAgain()
{
KRATOS_TRY
for(ModelPart::NodeIterator i = mr_model_part.NodesBegin() ;
i != mr_model_part.NodesEnd() ; ++i)
{
//array_1d<double,3>& actual_displacement = i->FastGetSolutionStepValue(DISPLACEMENT);
if( (i->pGetDof(DISPLACEMENT_X))->IsFixed() == false )
{
(i)->X() = (i)->X0() + i->GetSolutionStepValue(DISPLACEMENT_X);
}
if( (i->pGetDof(DISPLACEMENT_Y))->IsFixed() == false )
{
(i)->Y() = (i)->Y0() + i->GetSolutionStepValue(DISPLACEMENT_Y);
}
if (mrdimension==3)
{
if( (i->pGetDof(DISPLACEMENT_Z))->IsFixed() == false )
{
(i)->Z() = (i)->Z0() + i->GetSolutionStepValue(DISPLACEMENT_Z);
}
}
}
KRATOS_CATCH("")
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
return " ForwardIncrementLagrangeMultiplierScheme ";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const {}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
ModelPart mr_model_part;
unsigned int mrdimension;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(unsigned int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
/*
/// Assignment operator.
ForwardIncrementLagrangeMultiplierScheme& operator=(ForwardIncrementLagrangeMultiplierScheme const& rOther){}
/// Copy constructor.
ForwardIncrementLagrangeMultiplierScheme(ForwardIncrementLagrangeMultiplierScheme const& rOther){}
*/
///@}
}; // Class ForwardIncrementLagrangeMultiplierScheme
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/*
/// input stream function
inline std::istream& operator >> (std::istream& rIStream,
ForwardIncrementLagrangeMultiplierScheme& rThis);
/// output stream function
inline std::ostream& operator << (std::ostream& rOStream,
const ForwardIncrementLagrangeMultiplierScheme& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
*/
} // namespace Kratos.
#endif // FORWARD_INCREMENT_LAGRANGE_MULTIPLIER_SCHEME defined
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.