source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
space_to_depth.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_SPACE_TO_DEPTH_H_
#define MACE_KERNELS_SPACE_TO_DEPTH_H_
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#include "mace/public/mace.h"
#include "mace/kernels/kernel.h"
namespace mace {
namespace kernels {
template<DeviceType D, typename T>
struct SpaceToDepthOpFunctor : OpKernel {
SpaceToDepthOpFunctor(OpKernelContext *context,
const int block_size)
: OpKernel(context), block_size_(block_size) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
const index_t batch_size = input->dim(0);
const index_t input_depth = input->dim(1);
const index_t input_height = input->dim(2);
const index_t input_width = input->dim(3);
MACE_CHECK(
(input_width % block_size_ == 0) && (input_height % block_size_ == 0),
"input width and height should be dividable by block_size");
const index_t output_depth = input_depth * block_size_ * block_size_;
const index_t output_width = input_width / block_size_;
const index_t output_height = input_height / block_size_;
std::vector<index_t> output_shape = {batch_size, output_depth,
output_height, output_width};
MACE_RETURN_IF_ERROR(output->Resize(output_shape));
Tensor::MappingGuard logits_guard(input);
Tensor::MappingGuard output_guard(output);
const T *input_ptr = input->data<T>();
T *output_ptr = output->mutable_data<T>();
#pragma omp parallel for
for (index_t b = 0; b < batch_size; ++b) {
for (index_t d = 0; d < input_depth; ++d) {
for (index_t h = 0; h < input_height; ++h) {
const index_t out_h = h / block_size_;
const index_t offset_h = (h % block_size_);
for (index_t w = 0; w < input_width; ++w) {
const index_t out_w = w / block_size_;
const index_t offset_w = (w % block_size_);
const index_t offset_d =
(offset_h * block_size_ + offset_w) * input_depth;
const index_t out_d = d + offset_d;
const index_t o_index =
((b * output_depth + out_d) * output_height + out_h)
* output_width + out_w;
const index_t i_index =
((b * input_depth + d) * input_height + h) * input_width + w;
output_ptr[o_index] = input_ptr[i_index];
}
}
}
}
return MACE_SUCCESS;
}
const int block_size_;
};
#ifdef MACE_ENABLE_OPENCL
class OpenCLSpaceToDepthKernel {
public:
virtual MaceStatus Compute(
OpKernelContext *context,
const Tensor *input,
Tensor *output,
StatsFuture *future) = 0;
MACE_VIRTUAL_EMPTY_DESTRUCTOR(OpenCLSpaceToDepthKernel);
};
template<typename T>
struct SpaceToDepthOpFunctor<DeviceType::GPU, T> : OpKernel {
explicit SpaceToDepthOpFunctor(OpKernelContext *context,
const int block_size);
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future);
std::unique_ptr<OpenCLSpaceToDepthKernel> kernel_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_SPACE_TO_DEPTH_H_
|
GB_binop__lor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lor_int64
// A.*B function (eWiseMult): GB_AemultB__lor_int64
// A*D function (colscale): GB_AxD__lor_int64
// D*A function (rowscale): GB_DxB__lor_int64
// C+=B function (dense accum): GB_Cdense_accumB__lor_int64
// C+=b function (dense accum): GB_Cdense_accumb__lor_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_int64
// C=scalar+B GB_bind1st__lor_int64
// C=scalar+B' GB_bind1st_tran__lor_int64
// C=A+scalar GB_bind2nd__lor_int64
// C=A'+scalar GB_bind2nd_tran__lor_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ((x != 0) || (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_INT64 || GxB_NO_LOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lor_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lor_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lor_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lor_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__lor_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lor_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lor_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lor_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__lor_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__lor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__pair_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_bool)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_bool)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = 1
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_BOOL || GxB_NO_PAIR_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pair_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
common.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_UTILS_COMMON_H_
#define LIGHTGBM_UTILS_COMMON_H_
#if ((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__)))
#include <LightGBM/utils/common_legacy_solaris.h>
#endif
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <limits>
#include <string>
#include <algorithm>
#include <chrono>
#include <cmath>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <iomanip>
#include <iterator>
#include <map>
#include <memory>
#include <sstream>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#include <map>
#if (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))))
#define FMT_HEADER_ONLY
#include "../../../external_libs/fmt/include/fmt/format.h"
#endif
#include "../../../external_libs/fast_double_parser/include/fast_double_parser.h"
#ifdef _MSC_VER
#include <intrin.h>
#pragma intrinsic(_BitScanReverse)
#endif
#if defined(_MSC_VER)
#include <malloc.h>
#elif MM_MALLOC
#include <mm_malloc.h>
// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
// https://www.oreilly.com/library/view/mac-os-x/0596003560/ch05s01s02.html
#elif defined(__GNUC__) && defined(HAVE_MALLOC_H)
#include <malloc.h>
#define _mm_malloc(a, b) memalign(b, a)
#define _mm_free(a) free(a)
#else
#include <stdlib.h>
#define _mm_malloc(a, b) malloc(a)
#define _mm_free(a) free(a)
#endif
namespace LightGBM {
namespace Common {
using json11::Json;
/*!
* Imbues the stream with the C locale.
*/
static void C_stringstream(std::stringstream &ss) {
ss.imbue(std::locale::classic());
}
inline static char tolower(char in) {
if (in <= 'Z' && in >= 'A')
return in - ('Z' - 'z');
return in;
}
inline static std::string Trim(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1);
str.erase(0, str.find_first_not_of(" \f\n\r\t\v"));
return str;
}
inline static std::string RemoveQuotationSymbol(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of("'\"") + 1);
str.erase(0, str.find_first_not_of("'\""));
return str;
}
inline static bool StartsWith(const std::string& str, const std::string prefix) {
if (str.substr(0, prefix.size()) == prefix) {
return true;
} else {
return false;
}
}
inline static std::vector<std::string> Split(const char* c_str, char delimiter) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == delimiter) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> SplitBrackets(const char* c_str, char left_delimiter, char right_delimiter) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
bool open = false;
while (pos < str.length()) {
if (str[pos] == left_delimiter) {
open = true;
++pos;
i = pos;
} else if (str[pos] == right_delimiter && open) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
open = false;
++pos;
} else {
++pos;
}
}
return ret;
}
inline static std::vector<std::string> SplitLines(const char* c_str) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == '\n' || str[pos] == '\r') {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
// skip the line endings
while (str[pos] == '\n' || str[pos] == '\r') ++pos;
// new begin
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
bool met_delimiters = false;
for (int j = 0; delimiters[j] != '\0'; ++j) {
if (str[pos] == delimiters[j]) {
met_delimiters = true;
break;
}
}
if (met_delimiters) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::string GetFromParserConfig(std::string config_str, std::string key) {
// parser config should follow json format.
std::string err;
Json config_json = Json::parse(config_str, &err);
if (!err.empty()) {
Log::Fatal("Invalid parser config: %s. Please check if follow json format.", err.c_str());
}
return config_json[key].string_value();
}
inline static std::string SaveToParserConfig(std::string config_str, std::string key, std::string value) {
std::string err;
Json config_json = Json::parse(config_str, &err);
if (!err.empty()) {
Log::Fatal("Invalid parser config: %s. Please check if follow json format.", err.c_str());
}
CHECK(config_json.is_object());
std::map<std::string, Json> config_map = config_json.object_items();
config_map.insert(std::pair<std::string, Json>(key, Json(value)));
return Json(config_map).dump();
}
template<typename T>
inline static const char* Atoi(const char* p, T* out) {
int sign;
T value;
while (*p == ' ') {
++p;
}
sign = 1;
if (*p == '-') {
sign = -1;
++p;
} else if (*p == '+') {
++p;
}
for (value = 0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10 + (*p - '0');
}
*out = static_cast<T>(sign * value);
while (*p == ' ') {
++p;
}
return p;
}
template<typename T>
inline static double Pow(T base, int power) {
if (power < 0) {
return 1.0 / Pow(base, -power);
} else if (power == 0) {
return 1;
} else if (power % 2 == 0) {
return Pow(base*base, power / 2);
} else if (power % 3 == 0) {
return Pow(base*base*base, power / 3);
} else {
return base * Pow(base, power - 1);
}
}
inline static const char* Atof(const char* p, double* out) {
int frac;
double sign, value, scale;
*out = NAN;
// Skip leading white space, if any.
while (*p == ' ') {
++p;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
++p;
} else if (*p == '+') {
++p;
}
// is a number
if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') {
// Get digits before decimal point or exponent, if any.
for (value = 0.0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double right = 0.0;
int nn = 0;
++p;
while (*p >= '0' && *p <= '9') {
right = (*p - '0') + right * 10.0;
++nn;
++p;
}
value += right / Pow(10.0, nn);
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
uint32_t expon;
// Get sign of exponent, if any.
++p;
if (*p == '-') {
frac = 1;
++p;
} else if (*p == '+') {
++p;
}
// Get digits of exponent, if any.
for (expon = 0; *p >= '0' && *p <= '9'; ++p) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
*out = sign * (frac ? (value / scale) : (value * scale));
} else {
size_t cnt = 0;
while (*(p + cnt) != '\0' && *(p + cnt) != ' '
&& *(p + cnt) != '\t' && *(p + cnt) != ','
&& *(p + cnt) != '\n' && *(p + cnt) != '\r'
&& *(p + cnt) != ':') {
++cnt;
}
if (cnt > 0) {
std::string tmp_str(p, cnt);
std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower);
if (tmp_str == std::string("na") || tmp_str == std::string("nan") ||
tmp_str == std::string("null")) {
*out = NAN;
} else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) {
*out = sign * 1e308;
} else {
Log::Fatal("Unknown token %s in data file", tmp_str.c_str());
}
p += cnt;
}
}
while (*p == ' ') {
++p;
}
return p;
}
// Use fast_double_parse and strtod (if parse failed) to parse double.
inline static const char* AtofPrecise(const char* p, double* out) {
const char* end = fast_double_parser::parse_number(p, out);
if (end != nullptr) {
return end;
}
// Rare path: Not in RFC 7159 format. Possible "inf", "nan", etc. Fallback to standard library:
char* end2;
errno = 0; // This is Required before calling strtod.
*out = std::strtod(p, &end2); // strtod is locale aware.
if (end2 == p) {
Log::Fatal("no conversion to double for: %s", p);
}
if (errno == ERANGE) {
Log::Warning("convert to double got underflow or overflow: %s", p);
}
return end2;
}
inline static bool AtoiAndCheck(const char* p, int* out) {
const char* after = Atoi(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static bool AtofAndCheck(const char* p, double* out) {
const char* after = Atof(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static const char* SkipSpaceAndTab(const char* p) {
while (*p == ' ' || *p == '\t') {
++p;
}
return p;
}
inline static const char* SkipReturn(const char* p) {
while (*p == '\n' || *p == '\r' || *p == ' ') {
++p;
}
return p;
}
template<typename T, typename T2>
inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) {
std::vector<T2> ret(arr.size());
for (size_t i = 0; i < arr.size(); ++i) {
ret[i] = static_cast<T2>(arr[i]);
}
return ret;
}
template<typename T, bool is_float>
struct __StringToTHelper {
T operator()(const std::string& str) const {
T ret = 0;
Atoi(str.c_str(), &ret);
return ret;
}
};
template<typename T>
struct __StringToTHelper<T, true> {
T operator()(const std::string& str) const {
return static_cast<T>(std::stod(str));
}
};
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter) {
std::vector<std::string> strs = Split(str.c_str(), delimiter);
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T>
inline static std::vector<std::vector<T>> StringToArrayofArrays(
const std::string& str, char left_bracket, char right_bracket, char delimiter) {
std::vector<std::string> strs = SplitBrackets(str.c_str(), left_bracket, right_bracket);
std::vector<std::vector<T>> ret;
for (const auto& s : strs) {
ret.push_back(StringToArray<T>(s, delimiter));
}
return ret;
}
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
std::vector<std::string> strs = Split(str.c_str(), ' ');
CHECK_EQ(strs.size(), static_cast<size_t>(n));
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T, bool is_float>
struct __StringToTHelperFast {
const char* operator()(const char*p, T* out) const {
return Atoi(p, out);
}
};
template<typename T>
struct __StringToTHelperFast<T, true> {
const char* operator()(const char*p, T* out) const {
double tmp = 0.0f;
auto ret = Atof(p, &tmp);
*out = static_cast<T>(tmp);
return ret;
}
};
template<typename T>
inline static std::vector<T> StringToArrayFast(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
auto p_str = str.c_str();
__StringToTHelperFast<T, std::is_floating_point<T>::value> helper;
std::vector<T> ret(n);
for (int i = 0; i < n; ++i) {
p_str = helper(p_str, &ret[i]);
}
return ret;
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, const char* delimiter, const bool force_C_locale = false) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
if (force_C_locale) {
C_stringstream(str_buf);
}
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[0];
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
template<>
inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter, const bool force_C_locale) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
if (force_C_locale) {
C_stringstream(str_buf);
}
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << static_cast<int16_t>(strs[0]);
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << static_cast<int16_t>(strs[i]);
}
return str_buf.str();
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter, const bool force_C_locale = false) {
if (end - start <= 0) {
return std::string("");
}
start = std::min(start, static_cast<size_t>(strs.size()) - 1);
end = std::min(end, static_cast<size_t>(strs.size()));
std::stringstream str_buf;
if (force_C_locale) {
C_stringstream(str_buf);
}
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[start];
for (size_t i = start + 1; i < end; ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
inline static int64_t Pow2RoundUp(int64_t x) {
int64_t t = 1;
for (int i = 0; i < 64; ++i) {
if (t >= x) {
return t;
}
t <<= 1;
}
return 0;
}
/*!
* \brief Do inplace softmax transformation on p_rec
* \param p_rec The input/output vector of the values.
*/
inline static void Softmax(std::vector<double>* p_rec) {
std::vector<double> &rec = *p_rec;
double wmax = rec[0];
for (size_t i = 1; i < rec.size(); ++i) {
wmax = std::max(rec[i], wmax);
}
double wsum = 0.0f;
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] = std::exp(rec[i] - wmax);
wsum += rec[i];
}
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] /= static_cast<double>(wsum);
}
}
inline static void Softmax(const double* input, double* output, int len) {
double wmax = input[0];
for (int i = 1; i < len; ++i) {
wmax = std::max(input[i], wmax);
}
double wsum = 0.0f;
for (int i = 0; i < len; ++i) {
output[i] = std::exp(input[i] - wmax);
wsum += output[i];
}
for (int i = 0; i < len; ++i) {
output[i] /= static_cast<double>(wsum);
}
}
template<typename T>
std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) {
std::vector<const T*> ret;
for (auto t = input.begin(); t !=input.end(); ++t) {
ret.push_back(t->get());
}
return ret;
}
template<typename T1, typename T2>
inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) {
std::vector<std::pair<T1, T2>> arr;
auto& ref_key = *keys;
auto& ref_value = *values;
for (size_t i = start; i < keys->size(); ++i) {
arr.emplace_back(ref_key[i], ref_value[i]);
}
if (!is_reverse) {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first < b.first;
});
} else {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first > b.first;
});
}
for (size_t i = start; i < arr.size(); ++i) {
ref_key[i] = arr[i].first;
ref_value[i] = arr[i].second;
}
}
template <typename T>
inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) {
std::vector<T*> ptr(data->size());
auto& ref_data = *data;
for (size_t i = 0; i < data->size(); ++i) {
ptr[i] = ref_data[i].data();
}
return ptr;
}
template <typename T>
inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) {
std::vector<int> ret(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ret[i] = static_cast<int>(data[i].size());
}
return ret;
}
inline static double AvoidInf(double x) {
if (std::isnan(x)) {
return 0.0;
} else if (x >= 1e300) {
return 1e300;
} else if (x <= -1e300) {
return -1e300;
} else {
return x;
}
}
inline static float AvoidInf(float x) {
if (std::isnan(x)) {
return 0.0f;
} else if (x >= 1e38) {
return 1e38f;
} else if (x <= -1e38) {
return -1e38f;
} else {
return x;
}
}
template<typename _Iter> inline
static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) {
return (0);
}
template<typename _RanIt, typename _Pr, typename _VTRanIt> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) {
size_t len = _Last - _First;
const size_t kMinInnerLen = 1024;
int num_threads = OMP_NUM_THREADS();
if (len <= kMinInnerLen || num_threads <= 1) {
std::sort(_First, _Last, _Pred);
return;
}
size_t inner_size = (len + num_threads - 1) / num_threads;
inner_size = std::max(inner_size, kMinInnerLen);
num_threads = static_cast<int>((len + inner_size - 1) / inner_size);
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < num_threads; ++i) {
size_t left = inner_size*i;
size_t right = left + inner_size;
right = std::min(right, len);
if (right > left) {
std::sort(_First + left, _First + right, _Pred);
}
}
// Buffer for merge.
std::vector<_VTRanIt> temp_buf(len);
_RanIt buf = temp_buf.begin();
size_t s = inner_size;
// Recursive merge
while (s < len) {
int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2));
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < loop_size; ++i) {
size_t left = i * 2 * s;
size_t mid = left + s;
size_t right = mid + s;
right = std::min(len, right);
if (mid >= right) { continue; }
std::copy(_First + left, _First + mid, buf + left);
std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred);
}
s *= 2;
}
}
template<typename _RanIt, typename _Pr> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) {
return ParallelSort(_First, _Last, _Pred, IteratorValType(_First));
}
// Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not
template <typename T>
inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) {
auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) {
std::ostringstream os;
os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]";
Log::Fatal(os.str().c_str(), callername, i);
};
for (int i = 1; i < ny; i += 2) {
if (y[i - 1] < y[i]) {
if (y[i - 1] < ymin) {
fatal_msg(i - 1);
} else if (y[i] > ymax) {
fatal_msg(i);
}
} else {
if (y[i - 1] > ymax) {
fatal_msg(i - 1);
} else if (y[i] < ymin) {
fatal_msg(i);
}
}
}
if (ny & 1) { // odd
if (y[ny - 1] < ymin || y[ny - 1] > ymax) {
fatal_msg(ny - 1);
}
}
}
// One-pass scan over array w with nw elements: find min, max and sum of elements;
// this is useful for checking weight requirements.
template <typename T1, typename T2>
inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) {
T1 minw;
T1 maxw;
T1 sumw;
int i;
if (nw & 1) { // odd
minw = w[0];
maxw = w[0];
sumw = w[0];
i = 2;
} else { // even
if (w[0] < w[1]) {
minw = w[0];
maxw = w[1];
} else {
minw = w[1];
maxw = w[0];
}
sumw = w[0] + w[1];
i = 3;
}
for (; i < nw; i += 2) {
if (w[i - 1] < w[i]) {
minw = std::min(minw, w[i - 1]);
maxw = std::max(maxw, w[i]);
} else {
minw = std::min(minw, w[i]);
maxw = std::max(maxw, w[i - 1]);
}
sumw += w[i - 1] + w[i];
}
if (mi != nullptr) {
*mi = minw;
}
if (ma != nullptr) {
*ma = maxw;
}
if (su != nullptr) {
*su = static_cast<T2>(sumw);
}
}
inline static std::vector<uint32_t> EmptyBitset(int n) {
int size = n / 32;
if (n % 32 != 0) ++size;
return std::vector<uint32_t>(size);
}
template<typename T>
inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) {
auto& ref_v = *vec;
int i1 = val / 32;
int i2 = val % 32;
if (static_cast<int>(vec->size()) < i1 + 1) {
vec->resize(i1 + 1, 0);
}
ref_v[i1] |= (1 << i2);
}
template<typename T>
inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) {
std::vector<uint32_t> ret;
for (int i = 0; i < n; ++i) {
int i1 = vals[i] / 32;
int i2 = vals[i] % 32;
if (static_cast<int>(ret.size()) < i1 + 1) {
ret.resize(i1 + 1, 0);
}
ret[i1] |= (1 << i2);
}
return ret;
}
template<typename T>
inline static bool FindInBitset(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
inline static bool CheckDoubleEqualOrdered(double a, double b) {
double upper = std::nextafter(a, INFINITY);
return b <= upper;
}
inline static double GetDoubleUpperBound(double a) {
return std::nextafter(a, INFINITY);
}
inline static size_t GetLine(const char* str) {
auto start = str;
while (*str != '\0' && *str != '\n' && *str != '\r') {
++str;
}
return str - start;
}
inline static const char* SkipNewLine(const char* str) {
if (*str == '\r') {
++str;
}
if (*str == '\n') {
++str;
}
return str;
}
template <typename T>
static int Sign(T x) {
return (x > T(0)) - (x < T(0));
}
template <typename T>
static T SafeLog(T x) {
if (x > 0) {
return std::log(x);
} else {
return -INFINITY;
}
}
inline bool CheckAllowedJSON(const std::string& s) {
unsigned char char_code;
for (auto c : s) {
char_code = static_cast<unsigned char>(c);
if (char_code == 34 // "
|| char_code == 44 // ,
|| char_code == 58 // :
|| char_code == 91 // [
|| char_code == 93 // ]
|| char_code == 123 // {
|| char_code == 125 // }
) {
return false;
}
}
return true;
}
inline int RoundInt(double x) {
return static_cast<int>(x + 0.5f);
}
template <typename T, std::size_t N = 32>
class AlignmentAllocator {
public:
typedef T value_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
inline AlignmentAllocator() throw() {}
template <typename T2>
inline AlignmentAllocator(const AlignmentAllocator<T2, N>&) throw() {}
inline ~AlignmentAllocator() throw() {}
inline pointer adress(reference r) {
return &r;
}
inline const_pointer adress(const_reference r) const {
return &r;
}
inline pointer allocate(size_type n) {
return (pointer)_mm_malloc(n * sizeof(value_type), N);
}
inline void deallocate(pointer p, size_type) {
_mm_free(p);
}
inline void construct(pointer p, const value_type& wert) {
new (p) value_type(wert);
}
inline void destroy(pointer p) {
p->~value_type();
}
inline size_type max_size() const throw() {
return size_type(-1) / sizeof(value_type);
}
template <typename T2>
struct rebind {
typedef AlignmentAllocator<T2, N> other;
};
bool operator!=(const AlignmentAllocator<T, N>& other) const {
return !(*this == other);
}
// Returns true if and only if storage allocated from *this
// can be deallocated from other, and vice versa.
// Always returns true for stateless allocators.
bool operator==(const AlignmentAllocator<T, N>&) const {
return true;
}
};
class Timer {
public:
Timer() {
#ifdef TIMETAG
int num_threads = OMP_NUM_THREADS();
start_time_.resize(num_threads);
stats_.resize(num_threads);
#endif // TIMETAG
}
~Timer() { Print(); }
#ifdef TIMETAG
void Start(const std::string& name) {
auto tid = omp_get_thread_num();
start_time_[tid][name] = std::chrono::steady_clock::now();
}
void Stop(const std::string& name) {
auto cur_time = std::chrono::steady_clock::now();
auto tid = omp_get_thread_num();
if (stats_[tid].find(name) == stats_[tid].end()) {
stats_[tid][name] = std::chrono::duration<double, std::milli>(0);
}
stats_[tid][name] += cur_time - start_time_[tid][name];
}
#else
void Start(const std::string&) {}
void Stop(const std::string&) {}
#endif // TIMETAG
void Print() const {
#ifdef TIMETAG
std::unordered_map<std::string, std::chrono::duration<double, std::milli>>
stats(stats_[0].begin(), stats_[0].end());
for (size_t i = 1; i < stats_.size(); ++i) {
for (auto it = stats_[i].begin(); it != stats_[i].end(); ++it) {
if (stats.find(it->first) == stats.end()) {
stats[it->first] = it->second;
} else {
stats[it->first] += it->second;
}
}
}
std::map<std::string, std::chrono::duration<double, std::milli>> ordered(
stats.begin(), stats.end());
for (auto it = ordered.begin(); it != ordered.end(); ++it) {
Log::Info("%s costs:\t %f", it->first.c_str(), it->second * 1e-3);
}
#endif // TIMETAG
}
#ifdef TIMETAG
std::vector<
std::unordered_map<std::string, std::chrono::steady_clock::time_point>>
start_time_;
std::vector<std::unordered_map<std::string,
std::chrono::duration<double, std::milli>>>
stats_;
#endif // TIMETAG
};
// Note: this class is not thread-safe, don't use it inside omp blocks
class FunctionTimer {
public:
#ifdef TIMETAG
FunctionTimer(const std::string& name, Timer& timer) : timer_(timer) {
timer.Start(name);
name_ = name;
}
~FunctionTimer() { timer_.Stop(name_); }
private:
std::string name_;
Timer& timer_;
#else
FunctionTimer(const std::string&, Timer&) {}
#endif // TIMETAG
};
} // namespace Common
extern Common::Timer global_timer;
/*!
* Provides locale-independent alternatives to Common's methods.
* Essential to make models robust to locale settings.
*/
namespace CommonC {
template<typename T>
inline static std::string Join(const std::vector<T>& strs, const char* delimiter) {
return LightGBM::Common::Join(strs, delimiter, true);
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) {
return LightGBM::Common::Join(strs, start, end, delimiter, true);
}
inline static const char* Atof(const char* p, double* out) {
return LightGBM::Common::Atof(p, out);
}
template<typename T, bool is_float>
struct __StringToTHelperFast {
const char* operator()(const char*p, T* out) const {
return LightGBM::Common::Atoi(p, out);
}
};
/*!
* \warning Beware that ``Common::Atof`` in ``__StringToTHelperFast``,
* has **less** floating point precision than ``__StringToTHelper``.
* Both versions are kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision.
* Check ``StringToArrayFast`` and ``StringToArray`` for more details on this.
*/
template<typename T>
struct __StringToTHelperFast<T, true> {
const char* operator()(const char*p, T* out) const {
double tmp = 0.0f;
auto ret = Atof(p, &tmp);
*out = static_cast<T>(tmp);
return ret;
}
};
template<typename T, bool is_float>
struct __StringToTHelper {
T operator()(const std::string& str) const {
T ret = 0;
LightGBM::Common::Atoi(str.c_str(), &ret);
return ret;
}
};
/*!
* \warning Beware that ``Common::Atof`` in ``__StringToTHelperFast``,
* has **less** floating point precision than ``__StringToTHelper``.
* Both versions are kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision.
* Check ``StringToArrayFast`` and ``StringToArray`` for more details on this.
* \note It is possible that ``fast_double_parser::parse_number`` is faster than ``Common::Atof``.
*/
template<typename T>
struct __StringToTHelper<T, true> {
T operator()(const std::string& str) const {
double tmp;
const char* end = Common::AtofPrecise(str.c_str(), &tmp);
if (end == str.c_str()) {
Log::Fatal("Failed to parse double: %s", str.c_str());
}
return static_cast<T>(tmp);
}
};
/*!
* \warning Beware that due to internal use of ``Common::Atof`` in ``__StringToTHelperFast``,
* this method has less precision for floating point numbers than ``StringToArray``,
* which calls ``__StringToTHelper``.
* As such, ``StringToArrayFast`` and ``StringToArray`` are not equivalent!
* Both versions were kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision.
*/
template<typename T>
inline static std::vector<T> StringToArrayFast(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
auto p_str = str.c_str();
__StringToTHelperFast<T, std::is_floating_point<T>::value> helper;
std::vector<T> ret(n);
for (int i = 0; i < n; ++i) {
p_str = helper(p_str, &ret[i]);
}
return ret;
}
/*!
* \warning Do not replace calls to this method by ``StringToArrayFast``.
* This method is more precise for floating point numbers.
* Check ``StringToArrayFast`` for more details.
*/
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
std::vector<std::string> strs = LightGBM::Common::Split(str.c_str(), ' ');
CHECK_EQ(strs.size(), static_cast<size_t>(n));
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
/*!
* \warning Do not replace calls to this method by ``StringToArrayFast``.
* This method is more precise for floating point numbers.
* Check ``StringToArrayFast`` for more details.
*/
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter) {
std::vector<std::string> strs = LightGBM::Common::Split(str.c_str(), delimiter);
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
#if (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))))
/*!
* Safely formats a value onto a buffer according to a format string and null-terminates it.
*
* \note It checks that the full value was written or forcefully aborts.
* This safety check serves to prevent incorrect internal API usage.
* Correct usage will never incur in this problem:
* - The received buffer size shall be sufficient at all times for the input format string and value.
*/
template <typename T>
inline static void format_to_buf(char* buffer, const size_t buf_len, const char* format, const T value) {
auto result = fmt::format_to_n(buffer, buf_len, format, value);
if (result.size >= buf_len) {
Log::Fatal("Numerical conversion failed. Buffer is too small.");
}
buffer[result.size] = '\0';
}
template<typename T, bool is_float, bool high_precision>
struct __TToStringHelper {
void operator()(T value, char* buffer, size_t buf_len) const {
format_to_buf(buffer, buf_len, "{}", value);
}
};
template<typename T>
struct __TToStringHelper<T, true, false> {
void operator()(T value, char* buffer, size_t buf_len) const {
format_to_buf(buffer, buf_len, "{:g}", value);
}
};
template<typename T>
struct __TToStringHelper<T, true, true> {
void operator()(T value, char* buffer, size_t buf_len) const {
format_to_buf(buffer, buf_len, "{:.17g}", value);
}
};
/*!
* Converts an array to a string with with values separated by the space character.
* This method replaces Common's ``ArrayToString`` and ``ArrayToStringFast`` functionality
* and is locale-independent.
*
* \note If ``high_precision_output`` is set to true,
* floating point values are output with more digits of precision.
*/
template<bool high_precision_output = false, typename T>
inline static std::string ArrayToString(const std::vector<T>& arr, size_t n) {
if (arr.empty() || n == 0) {
return std::string("");
}
__TToStringHelper<T, std::is_floating_point<T>::value, high_precision_output> helper;
const size_t buf_len = high_precision_output ? 32 : 16;
std::vector<char> buffer(buf_len);
std::stringstream str_buf;
Common::C_stringstream(str_buf);
helper(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
helper(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
#endif // (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))))
} // namespace CommonC
} // namespace LightGBM
#endif // LIGHTGBM_UTILS_COMMON_H_
|
rumi6r.c | /*
* Date: 11 December 2015
* Contact: Thomas Peyrin - thomas.peyrin@gmail.com
*/
/*
* Simulation of boomerang analysis for Skinny
* Date: March 21, 2020
* Author: Hosein Hadipour
* Contact: hsn.hadipour@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <stdbool.h>
// #define DEBUG 1
#define Nthreads 4 // Number of parallel threads utilized in this program
#define NumOfExperiments 128 // Number of independent experiments
// Table that encodes the parameters of the various Skinny versions:
// (block size, key size, number of rounds)
//Skinny-64-64: 32 rounds
//Skinny-64-128: 36 rounds
//Skinny-64-192: 40 rounds
//Skinny-128-128: 40 rounds
//Skinny-128-256: 48 rounds
//Skinny-128-384: 56 rounds
int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}};
// Packing of data is done as follows (state[i][j] stands for row i and column j):
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
//12 13 14 15
// 4-bit Sbox
const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15};
const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15};
// 8-bit Sbox
const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff};
const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff};
// ShiftAndSwitchRows permutation
const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12};
const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14};
// Tweakey permutation
const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7};
const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1};
// round constants
const unsigned char RC[62] = {
0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F,
0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B,
0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E,
0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A,
0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13,
0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28,
0x10, 0x20};
FILE *fic;
void init_prng(int offset) {
// unsigned int initial_seed = 0x5ED90662;
// unsigned int initial_seed = 0x30051991; My birthday!
unsigned int initial_seed = 10*time(NULL) + 11*offset;
srand(initial_seed); // Initialization, should only be called once. int r = rand();
printf("[+] PRNG initialized to 0x%08X\n", initial_seed);
}
void display_matrix(unsigned char state[4][4], int ver)
{
int i;
unsigned char input[16];
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
for (i = 0; i < 8; i++)
fprintf(fic, "%02x", input[i]);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
for (i = 0; i < 16; i++)
fprintf(fic, "%02x", input[i]);
}
}
void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int k;
fprintf(fic, "S = ");
display_matrix(state, ver);
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
fprintf(fic, " - TK%i = ", k + 1);
display_matrix(keyCells[k], ver);
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state
void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the TWEAKEY permutation
pos = TWEAKEY_P[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
//application of LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function}
void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse TWEAKEY permutation
pos = TWEAKEY_P_inv[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 2; i <= 3; i++)
{
for (j = 0; j < 4; j++)
{
//application of inverse LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
}
// Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state
void AddConstants(unsigned char state[4][4], int r)
{
state[0][0] ^= (RC[r] & 0xf);
state[1][0] ^= ((RC[r] >> 4) & 0x3);
state[2][0] ^= 0x2;
}
// apply the 4-bit Sbox
void SubCell4(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4[state[i][j]];
}
// apply the 4-bit inverse Sbox
void SubCell4_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4_inv[state[i][j]];
}
// apply the 8-bit Sbox
void SubCell8(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8[state[i][j]];
}
// apply the 8-bit inverse Sbox
void SubCell8_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8_inv[state[i][j]];
}
// Apply the ShiftRows function
void ShiftRows(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the ShiftRows permutation
pos = P[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the inverse ShiftRows function
void ShiftRows_inv(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse ShiftRows permutation
pos = P_inv[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the linear diffusion matrix
//M =
//1 0 1 1
//1 0 0 0
//0 1 1 0
//1 0 1 0
void MixColumn(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
state[1][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[3][j] ^= state[2][j];
temp = state[3][j];
state[3][j] = state[2][j];
state[2][j] = state[1][j];
state[1][j] = state[0][j];
state[0][j] = temp;
}
}
// Apply the inverse linear diffusion matrix
void MixColumn_inv(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
temp = state[3][j];
state[3][j] = state[0][j];
state[0][j] = state[1][j];
state[1][j] = state[2][j];
state[2][j] = temp;
state[3][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[1][j] ^= state[2][j];
}
}
// decryption function of Skinny
void dec(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char dummy[4][4] = {{0}};
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
for (i = r - 1; i >= 0; i--)
{
AddKey(dummy, keyCells, ver);
}
#ifdef DEBUG
fprintf(fic, "DEC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = r - 1; i >= 0; i--)
{
MixColumn_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey_inv(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
SubCell4_inv(state);
else
SubCell8_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
}
#ifdef DEBUG
fprintf(fic, "DEC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// encryption function of Skinny
void enc(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
#ifdef DEBUG
fprintf(fic, "ENC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = 0; i < r; i++)
{
if (versions[ver][0] == 64)
SubCell4(state);
else
SubCell8(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after SubCell: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddConstants: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddKey: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
MixColumn(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after MixColumn: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
} //The last subtweakey should not be added
#ifdef DEBUG
fprintf(fic, "ENC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// generate test vectors for all the versions of Skinny
void TestVectors(int ver)
{
unsigned char p[16];
unsigned char c[16];
unsigned char k[48];
int n;
for (n = 1; n < 10; n++)
{
int i;
for (i = 0; i < (versions[ver][0] >> 3); i++)
c[i] = p[i] = rand() & 0xff;
for (i = 0; i < (versions[ver][0] >> 3); i++)
printf("%02x", p[i]);
printf("\n");
for (i = 0; i < (versions[ver][1] >> 3); i++)
k[i] = rand() & 0xff;
fprintf(fic, "TK = ");
for (i = 0; i < (versions[ver][1] >> 3); i++)
fprintf(fic, "%02x", k[i]);
fprintf(fic, "\n");
fprintf(fic, "P = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", p[i]);
fprintf(fic, "\n");
enc(c, k, ver, 10);
fprintf(fic, "C = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n");
dec(c, k, ver, 10);
fprintf(fic, "P' = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n\n");
}
}
int boomerang(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
int i;
unsigned char p1[16], p2[16];
unsigned char c3[16], c4[16];
unsigned char k1[48], k2[48], k3[48], k4[48];
// randomly choose k1
for (i = 0; i < (versions[ver][1] >> 3); i++)
k1[i] = rand() & 0xff;
// derive k2
for (i = 0; i < (versions[ver][1] >> 3); i++)
k2[i] = k1[i] ^ dk1[i];
// derive k3
for (i = 0; i < (versions[ver][1] >> 3); i++)
k3[i] = k1[i] ^ dk2[i];
// derive k4
for (i = 0; i < (versions[ver][1] >> 3); i++)
k4[i] = k2[i] ^ dk2[i];
int num = 0;
for (int t = 0; t < N3; t++)
{
// randomly choose p1
for (i = 0; i < (versions[ver][0] >> 3); i++)
p1[i] = rand() & 0xff;
// derive p2
for (i = 0; i < (versions[ver][0] >> 3); i++)
p2[i] = p1[i] ^ dp[i];
enc(p1, k1, ver, r);
enc(p2, k2, ver, r);
// derive c3
for (i = 0; i < (versions[ver][0] >> 3); i++)
c3[i] = p1[i] ^ dc[i];
// derive c4
for (i = 0; i < (versions[ver][0] >> 3); i++)
c4[i] = p2[i] ^ dc[i];
dec(c3, k3, ver, r);
dec(c4, k4, ver, r);
bool flag = 1;
for (i = 0; i < (versions[ver][0] >> 3); i++)
if ((c3[i] ^ c4[i]) != dp[i])
flag = 0;
if (flag)
{
num++;
}
}
return num;
}
double send_boomerangs(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
// Parallel execution
int NUM[N1];
int counter;
printf("#Rounds: %d rounds\n", R);
printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2));
clock_t clock_timer;
double wall_timer;
clock_timer = clock();
wall_timer = omp_get_wtime();
omp_set_num_threads(N1);
#pragma omp parallel for
for (counter = 0; counter < N1; counter++)
{
int num = 0;
int ID = omp_get_thread_num();
init_prng(ID);
for (int j = 0; j < N2; j++)
{
num += boomerang(R, ver, N3, dp, dc, dk1, dk2);
}
NUM[ID] = num;
}
printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC);
printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer);
double sum = 0;
double sum_temp = 1;
for (int i = 0; i < N1; i++)
sum += NUM[i];
printf("sum = %f\n", sum);
sum_temp = (double)(N1 * N2 * N3) / sum;
printf("2^(-%f)\n\n", log(sum_temp) / log(2));
printf("##########################\n");
return sum;
}
void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16])
{
for (int i = 0; i < (versions[ver][0] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48])
{
for (int i = 0; i < (versions[ver][1] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
int main()
{
// srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand();
// init_prng(1);
// //test all versions of Skinny
// for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++)
// {
// sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]);
// fic = fopen(name, "w");
// fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]);
// TestVectors(i);
// fclose(fic);
// printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]);
// }
unsigned char dp[16];
unsigned char dc[16];
unsigned char dk1[48];
unsigned char dk2[48];
// #######################################################################################################
// #######################################################################################################
// ############################## User must change only the following lines ##############################
int R = 6; // Number of rounds
int ver = 5; // Determine the version:
// [0 = Skinny-64-64]
// [1 = Skinny-64-128]
// [2 = Skinny-64-192]
// [3 = Skinny-128-128]
// [4 = Skinny-128-256]
// [5 = Skinny-128-384]
char dp_str[] = "00000000000000000000004000000000";
char dc_str[] = "00000000000000000000000000000000";
char dk1_str[] = "00000000000000000000000000002a00000000000000000000000000000099000000000000000000000000000000f300";
char dk2_str[] = "000000000000000000000054000000000000000000000000000000f30000000000000000000000000000007f00000000";
// #######################################################################################################
// #######################################################################################################
convert_hexstr_to_statearray(ver, dp_str, dp);
convert_hexstr_to_statearray(ver, dc_str, dc);
convert_hexstr_to_tweakarray(ver, dk1_str, dk1);
convert_hexstr_to_tweakarray(ver, dk2_str, dk2);
//########################## Number of queries #########################
int N1 = Nthreads; // Number of parallel threads : N1
int deg1 = 10;
int deg2 = 12;
int N2 = 1 << deg1; // Number of bunches per thread : N2 = 2^(deg)
int N3 = 1 << deg2; // Number of queries per bunche : N3
//################### Number of total queries : N1*N2*N3 ###############
char all_results[NumOfExperiments][20];
double sum = 0;
double sum_temp = 0;
for (int i = 0; i < NumOfExperiments; i++)
{
printf("Experiment Number %d:\n", i);
sum_temp = send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2);
sum += sum_temp;
sum_temp = (double)(N1 * N2 * N3) / sum_temp;
sprintf(all_results[i], "2^(-%0.2f), ", log(sum_temp) / log(2));
}
printf("A summary of all results:\n");
for (int i = 0; i < NumOfExperiments; i++)
{
printf("%s", all_results[i]);
}
printf("\n##########################\nAverage = 2^(-%0.4f)\n",
(log(NumOfExperiments) + log(N1) + log(N2) + log(N3) - log(sum))/log(2));
return 0;
}
|
NodeMapping.h |
/*****************************************************************************
*
* Copyright (c) 2003-2020 by The University of Queensland
* http://www.uq.edu.au
*
* Primary Business: Queensland, Australia
* Licensed under the Apache License, version 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Development until 2012 by Earth Systems Science Computational Center (ESSCC)
* Development 2012-2013 by School of Earth Sciences
* Development from 2014-2017 by Centre for Geoscience Computing (GeoComp)
* Development from 2019 by School of Earth and Environmental Sciences
**
*****************************************************************************/
#ifndef __DUDLEY_NODEMAPPING_H__
#define __DUDLEY_NODEMAPPING_H__
#include "Util.h"
namespace dudley {
/// NodeMapping provides a mapping from the local nodes typically to the
/// degrees of freedom, the reduced degrees of freedom or the reduced node set
struct NodeMapping
{
NodeMapping() : numNodes(0), target(NULL), numTargets(0), map(NULL) {}
/// resets both map and target
void clear()
{
delete[] map;
delete[] target;
target = NULL;
map = NULL;
numNodes = 0;
numTargets = 0;
}
/// initializes a node mapping. The target array is copied and a reverse
/// map created.
/// theTarget[i]=unused means that no target is defined for FEM node i.
void assign(const index_t* theTarget, dim_t nNodes, index_t unused)
{
clear();
if (nNodes == 0)
return;
numNodes = nNodes;
std::pair<index_t,index_t> range(
util::getFlaggedMinMaxInt(numNodes, theTarget, unused));
if (range.first < 0) {
throw escript::ValueError("NodeMapping: target has negative entry.");
}
numTargets = range.first<=range.second ? range.second+1 : 0;
target = new index_t[numNodes];
map = new index_t[numTargets];
bool err = false;
#pragma omp parallel
{
#pragma omp for
for (index_t i=0; i<numNodes; ++i) {
target[i] = theTarget[i];
if (target[i] != unused)
map[target[i]] = i;
}
// sanity check
#pragma omp for
for (index_t i=0; i<numTargets; ++i) {
if (map[i] == -1) {
#pragma omp critical
err = true;
}
}
}
if (err)
throw escript::ValueError("NodeMapping: target does not define a continuous labeling.");
}
/// returns the number of target nodes (number of items in the map array)
inline dim_t getNumTargets() const { return numTargets; }
/// size of `target` (number of FEM nodes)
dim_t numNodes;
/// target[i] defines the target of FEM node i=0,...,numNodes
index_t* target;
/// size of `map` (number of target nodes, e.g. DOF, reduced DOF, etc.)
dim_t numTargets;
/// maps the target nodes back to the FEM nodes: target[map[i]]=i
index_t* map;
};
} // namespace dudley
#endif // __DUDLEY_NODEMAPPING_H__
|
wallsinkprop.h | #ifndef _WALLSINK_PROP_H
#define _WALLSINK_PROP_H
CPS_START_NAMESPACE
template<typename MatrixType>
class siteMatrixCompute{};
template<>
class siteMatrixCompute<SpinColorFlavorMatrix>{
PropWrapper prop;
bool setup;
public:
siteMatrixCompute():setup(false){}
bool isSetup() const{ return setup; }
void setProp(const PropWrapper &_prop){
prop = _prop; setup = true;
}
void siteMatrix(SpinColorFlavorMatrix &into, const int site){
prop.siteMatrix(into,site);
}
void multGFmat(SpinColorFlavorMatrix &what, const int site, Lattice &lat){
Matrix const* gfmat_f0 = lat.FixGaugeMatrix(site,0);
Matrix const* gfmat_f1 = lat.FixGaugeMatrix(site,1);
if(gfmat_f0 == NULL || gfmat_f1 == NULL) ERR.General("siteMatrixCompute<SpinColorFlavorMatrix>","multGFmat","No gauge fixing matrix for site %d",site);
what(0,0).LeftTimesEqual(*gfmat_f0);
what(0,1).LeftTimesEqual(*gfmat_f0);
what(1,0).LeftTimesEqual(*gfmat_f1);
what(1,1).LeftTimesEqual(*gfmat_f1);
}
void latticeSum(SpinColorFlavorMatrix &what){
for(int i=0;i<2;i++)
for(int j=0;j<2;j++){
WilsonMatrix &wm = what(i,j);
Float* w = (Float*)wm.ptr();
static const int size = 2*12*12;
slice_sum(w, size, 99); //99 is a *magic* number (we are abusing slice_sum here)
}
}
};
template<>
class siteMatrixCompute<WilsonMatrix>{
const QPropW *prop;
bool setup;
public:
siteMatrixCompute():setup(false){}
bool isSetup() const{ return setup; }
void setProp(const QPropW &_prop){
prop = &_prop; setup = true;
}
void setProp(const PropWrapper &_prop){
prop = _prop.getPtr(0); setup = true;
}
void siteMatrix(WilsonMatrix &into, const int site){
into = prop->SiteMatrix(site);
}
void multGFmat(WilsonMatrix &what, const int site, Lattice &lat){
Matrix const* gfmat = lat.FixGaugeMatrix(site);
if(gfmat == NULL) ERR.General("siteMatrixCompute<WilsonMatrix>","multGFmat","No gauge fixing matrix for site %d",site);
what.LeftTimesEqual(*gfmat);
}
void latticeSum(WilsonMatrix &what){
Float* w = (Float*)what.ptr();
static const int size = 2*12*12;
slice_sum(w, size, 99); //99 is a *magic* number (we are abusing slice_sum here)
}
};
template<typename MatrixType>
class WallSinkProp: public siteMatrixCompute<MatrixType>{
std::vector<MatrixType> result;
void global_coord(const int site, int into_vec[4]){
int rem = site;
for(int i=0;i<4;i++){
into_vec[i] = rem % GJP.NodeSites(i) + GJP.NodeCoor(i)*GJP.NodeSites(i);
rem /= GJP.NodeSites(i);
}
}
bool gauge_fix_sink;
public:
WallSinkProp(const bool _gauge_fix_sink = true): gauge_fix_sink(_gauge_fix_sink){ }
//Can provide an optional sink momentum in lattice units, for which the phase exp(-ip.x) is then applied
void compute(Lattice &lat, const double *p = NULL){
if(!this->isSetup()) ERR.General("WallSinkProp","compute","Class has not been set up\n");
const int global_T = GJP.TnodeSites()*GJP.Tnodes();
const int local_T = GJP.TnodeSites();
const int local_toff = GJP.TnodeCoor()*local_T;
const int nthread = omp_get_max_threads();
std::vector<std::vector<MatrixType> > thread_mats(global_T); //[t][thread]
for(int t=0;t<global_T;t++){
thread_mats[t].resize(nthread);
for(int thr=0;thr<nthread;thr++)
thread_mats[t][thr] = 0.0;
}
#pragma omp parallel for
for(int x=0;x<GJP.VolNodeSites();x++){
int x_glb[4]; global_coord(x,x_glb);
MatrixType mat; this->siteMatrix(mat, x);
//Gauge fix sink
if(gauge_fix_sink){
if(!lat.FixGaugeKind()) ERR.General("WallSinkProp","compute","Lattice is not gauge fixed!\n");
this->multGFmat(mat,x,lat);
}
if(p!=NULL){
Float pdotx = 0.0;
for(int i=0;i<3;i++) pdotx += p[i]*x_glb[i];
mat *= Complex(cos(pdotx),-sin(pdotx));
}
thread_mats[x_glb[3]][omp_get_thread_num()] += mat;
}
//Thread sum
result.resize(global_T);
#pragma omp parallel for
for(int t=0;t<global_T;t++){
result[t] = thread_mats[t][0];
for(int thr=1;thr<nthread;thr++){
result[t] += thread_mats[t][thr];
}
}
thread_mats.clear();
//Lattice sum
for(int t=0;t<global_T;t++)
this->latticeSum(result[t]);
}
void compute(Lattice &lat, const ThreeMomentum &p){
Float pp[3]; p.latticeUnits(pp);
compute(lat,pp);
}
const MatrixType & operator()(const int t_glb) const{
return result[t_glb];
}
};
CPS_END_NAMESPACE
#endif
|
shear.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS H H EEEEE AAA RRRR %
% SS H H E A A R R %
% SSS HHHHH EEE AAAAA RRRR %
% SS H H E A A R R %
% SSSSS H H EEEEE A A R R %
% %
% %
% MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The XShearImage() and YShearImage() methods are based on the paper "A Fast
% Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics
% Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/shear.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C r o p T o F i t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropToFitImage() crops the sheared image as determined by the bounding box
% as defined by width and height and shearing angles.
%
% The format of the CropToFitImage method is:
%
% MagickBooleanType CropToFitImage(Image **image,
% const double x_shear,const double x_shear,
% const double width,const double height,
% const MagickBooleanType rotate,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear, width, height: Defines a region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CropToFitImage(Image **image,
const double x_shear,const double y_shear,
const double width,const double height,
const MagickBooleanType rotate,ExceptionInfo *exception)
{
Image
*crop_image;
PointInfo
extent[4],
min,
max;
RectangleInfo
geometry,
page;
register ssize_t
i;
/*
Calculate the rotated image size.
*/
extent[0].x=(double) (-width/2.0);
extent[0].y=(double) (-height/2.0);
extent[1].x=(double) width/2.0;
extent[1].y=(double) (-height/2.0);
extent[2].x=(double) (-width/2.0);
extent[2].y=(double) height/2.0;
extent[3].x=(double) width/2.0;
extent[3].y=(double) height/2.0;
for (i=0; i < 4; i++)
{
extent[i].x+=x_shear*extent[i].y;
extent[i].y+=y_shear*extent[i].x;
if (rotate != MagickFalse)
extent[i].x+=x_shear*extent[i].y;
extent[i].x+=(double) (*image)->columns/2.0;
extent[i].y+=(double) (*image)->rows/2.0;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
geometry.x=(ssize_t) ceil(min.x-0.5);
geometry.y=(ssize_t) ceil(min.y-0.5);
geometry.width=(size_t) floor(max.x-min.x+0.5);
geometry.height=(size_t) floor(max.y-min.y+0.5);
page=(*image)->page;
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
crop_image=CropImage(*image,&geometry,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
crop_image->page=page;
*image=DestroyImage(*image);
*image=crop_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s k e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeskewImage() removes skew from the image. Skew is an artifact that
% occurs in scanned images because of the camera being misaligned,
% imperfections in the scanning or surface, or simply because the paper was
% not placed completely flat when scanned.
%
% The result will be auto-croped if the artifact "deskew:auto-crop" is
% defined, while the amount the image is to be deskewed, in degrees is also
% saved as the artifact "deskew:angle".
%
% The format of the DeskewImage method is:
%
% Image *DeskewImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: separate background from foreground.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void RadonProjection(const Image *image,MatrixInfo *source_matrixs,
MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection)
{
MatrixInfo
*swap;
register MatrixInfo
*p,
*q;
register ssize_t
x;
size_t
step;
p=source_matrixs;
q=destination_matrixs;
for (step=1; step < GetMatrixColumns(p); step*=2)
{
for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step)
{
register ssize_t
i;
ssize_t
y;
unsigned short
element,
neighbor;
for (i=0; i < (ssize_t) step; i++)
{
for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) GetMatrixRows(p); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
}
}
swap=p;
p=q;
q=swap;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,GetMatrixColumns(p),1)
#endif
for (x=0; x < (ssize_t) GetMatrixColumns(p); x++)
{
register ssize_t
y;
size_t
sum;
sum=0;
for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++)
{
ssize_t
delta;
unsigned short
element,
neighbor;
if (GetMatrixElement(p,x,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse)
continue;
delta=(ssize_t) element-(ssize_t) neighbor;
sum+=delta*delta;
}
projection[GetMatrixColumns(p)+sign*x-1]=sum;
}
}
static MagickBooleanType RadonTransform(const Image *image,
const double threshold,size_t *projection,ExceptionInfo *exception)
{
CacheView
*image_view;
MatrixInfo
*destination_matrixs,
*source_matrixs;
MagickBooleanType
status;
size_t
count,
width;
ssize_t
j,
y;
unsigned char
c;
unsigned short
bits[256];
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short),
exception);
destination_matrixs=AcquireMatrixInfo(width,image->rows,
sizeof(unsigned short),exception);
if ((source_matrixs == (MatrixInfo *) NULL) ||
(destination_matrixs == (MatrixInfo *) NULL))
{
if (destination_matrixs != (MatrixInfo *) NULL)
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
if (source_matrixs != (MatrixInfo *) NULL)
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickFalse);
}
if (NullMatrix(source_matrixs) == MagickFalse)
{
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickFalse);
}
for (j=0; j < 256; j++)
{
c=(unsigned char) j;
for (count=0; c != 0; c>>=1)
count+=c & 0x01;
bits[j]=(unsigned short) count;
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=(ssize_t) (image->columns+7)/8;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(image,p) < threshold) ||
((MagickRealType) GetPixelGreen(image,p) < threshold) ||
((MagickRealType) GetPixelBlue(image,p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrixs,--i,y,&value);
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrixs,--i,y,&value);
}
}
RadonProjection(image,source_matrixs,destination_matrixs,-1,projection);
(void) NullMatrix(source_matrixs);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(image,p) < threshold) ||
((MagickRealType) GetPixelGreen(image,p) < threshold) ||
((MagickRealType) GetPixelBlue(image,p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrixs,i++,y,&value);
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrixs,i++,y,&value);
}
}
RadonProjection(image,source_matrixs,destination_matrixs,1,projection);
image_view=DestroyCacheView(image_view);
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickTrue);
}
static void GetImageBackgroundColor(Image *image,const ssize_t offset,
ExceptionInfo *exception)
{
CacheView
*image_view;
PixelInfo
background;
double
count;
ssize_t
y;
/*
Compute average background color.
*/
if (offset <= 0)
return;
GetPixelInfo(image,&background);
count=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if ((y >= offset) && (y < ((ssize_t) image->rows-offset)))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x >= offset) && (x < ((ssize_t) image->columns-offset)))
continue;
background.red+=QuantumScale*GetPixelRed(image,p);
background.green+=QuantumScale*GetPixelGreen(image,p);
background.blue+=QuantumScale*GetPixelBlue(image,p);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
background.alpha+=QuantumScale*GetPixelAlpha(image,p);
count++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->background_color.red=(double) ClampToQuantum(QuantumRange*
background.red/count);
image->background_color.green=(double) ClampToQuantum(QuantumRange*
background.green/count);
image->background_color.blue=(double) ClampToQuantum(QuantumRange*
background.blue/count);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->background_color.alpha=(double) ClampToQuantum(QuantumRange*
background.alpha/count);
}
MagickExport Image *DeskewImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
AffineMatrix
affine_matrix;
const char
*artifact;
double
degrees;
Image
*clone_image,
*crop_image,
*deskew_image,
*median_image;
MagickBooleanType
status;
RectangleInfo
geometry;
register ssize_t
i;
size_t
max_projection,
*projection,
width;
ssize_t
skew;
/*
Compute deskew angle.
*/
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1),
sizeof(*projection));
if (projection == (size_t *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
status=RadonTransform(image,threshold,projection,exception);
if (status == MagickFalse)
{
projection=(size_t *) RelinquishMagickMemory(projection);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
max_projection=0;
skew=0;
for (i=0; i < (ssize_t) (2*width-1); i++)
{
if (projection[i] > max_projection)
{
skew=i-(ssize_t) width+1;
max_projection=projection[i];
}
}
projection=(size_t *) RelinquishMagickMemory(projection);
degrees=RadiansToDegrees(-atan((double) skew/width/8));
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Deskew angle: %g",degrees);
/*
Deskew image.
*/
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
{
char
angle[MagickPathExtent];
(void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees);
(void) SetImageArtifact(clone_image,"deskew:angle",angle);
}
(void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod,
exception);
affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0))));
affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.tx=0.0;
affine_matrix.ty=0.0;
artifact=GetImageArtifact(image,"deskew:auto-crop");
if (IsStringTrue(artifact) == MagickFalse)
{
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
return(deskew_image);
}
/*
Auto-crop image.
*/
GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact),
exception);
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
if (deskew_image == (Image *) NULL)
return((Image *) NULL);
median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception);
if (median_image == (Image *) NULL)
{
deskew_image=DestroyImage(deskew_image);
return((Image *) NULL);
}
geometry=GetImageBoundingBox(median_image,exception);
median_image=DestroyImage(median_image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: "
"%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
crop_image=CropImage(deskew_image,&geometry,exception);
deskew_image=DestroyImage(deskew_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e g r a l R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralRotateImage() rotates the image an integral of 90 degrees. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the rotated image.
%
% The format of the IntegralRotateImage method is:
%
% Image *IntegralRotateImage(const Image *image,size_t rotations,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o rotations: Specifies the number of 90 degree rotations.
%
*/
MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations,
ExceptionInfo *exception)
{
#define RotateImageTag "Rotate/Image"
CacheView
*image_view,
*rotate_view;
Image
*rotate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
/*
Initialize rotated image attributes.
*/
assert(image != (Image *) NULL);
page=image->page;
rotations%=4;
switch (rotations)
{
case 0:
{
rotate_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case 2:
{
rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
break;
}
case 1:
case 3:
{
rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
break;
}
}
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
if (rotations == 0)
return(rotate_image);
/*
Integral rotate the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
rotate_view=AcquireAuthenticCacheView(rotate_image,exception);
switch (rotations)
{
case 1:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 90 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
tile_x=0;
for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (y=0; y < (ssize_t) width; y++)
{
register const Quantum
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t)
(rotate_image->columns-(tile_y+height)),y+tile_x,height,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image);
for (x=0; x < (ssize_t) height; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,tile_pixels[i],q);
}
tile_pixels-=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
break;
}
case 2:
{
register ssize_t
y;
/*
Rotate 180 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y-
1),image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(rotate_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(rotate_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
case 3:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 270 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
tile_x=0;
for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (y=0; y < (ssize_t) width; y++)
{
register const Quantum
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+
rotate_image->rows-(tile_x+width)),height,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+((width-1)-y)*GetPixelChannels(image);
for (x=0; x < (ssize_t) height; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,tile_pixels[i],q);
}
tile_pixels+=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
default:
break;
}
rotate_view=DestroyCacheView(rotate_view);
image_view=DestroyCacheView(image_view);
rotate_image->type=image->type;
rotate_image->page=page;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ X S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% XShearImage() shears the image in the X direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a vertical
% Y-axis. X shears will widen an image creating 'empty' triangles on the left
% and right sides of the source image.
%
% The format of the XShearImage method is:
%
% MagickBooleanType XShearImage(Image *image,const double degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A double representing the shearing angle along the X
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType XShearImage(Image *image,const double degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define XShearImageTag "XShear/Image"
typedef enum
{
LEFT,
RIGHT
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
background;
ssize_t
y;
/*
X shear image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
background=image->background_color;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelInfo
pixel,
source,
destination;
double
area,
displacement;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1,
exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=x_offset*GetPixelChannels(image);
displacement=degrees*(double) (y-height/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=RIGHT;
else
{
displacement*=(-1.0);
direction=LEFT;
}
step=(ssize_t) floor((double) displacement);
area=(double) (displacement-step);
step++;
pixel=background;
GetPixelInfo(image,&source);
GetPixelInfo(image,&destination);
switch (direction)
{
case LEFT:
{
/*
Transfer pixels left-to-right.
*/
if (step > x_offset)
break;
q=p-step*GetPixelChannels(image);
for (i=0; i < (ssize_t) width; i++)
{
if ((x_offset+i) < step)
{
p+=GetPixelChannels(image);
GetPixelInfoPixel(image,p,&pixel);
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
q+=GetPixelChannels(image);
for (i=0; i < (step-1); i++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
break;
}
case RIGHT:
{
/*
Transfer pixels right-to-left.
*/
p+=width*GetPixelChannels(image);
q=p+step*GetPixelChannels(image);
for (i=0; i < (ssize_t) width; i++)
{
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
if ((size_t) (x_offset+width+step-i) > image->columns)
continue;
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&destination,q);
for (i=0; i < (step-1); i++)
{
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&background,q);
}
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,XShearImageTag,progress,height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Y S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% YShearImage shears the image in the Y direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a
% horizontal X-axis. Y shears will increase the height of an image creating
% 'empty' triangles on the top and bottom of the source image.
%
% The format of the YShearImage method is:
%
% MagickBooleanType YShearImage(Image *image,const double degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A double representing the shearing angle along the Y
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType YShearImage(Image *image,const double degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define YShearImageTag "YShear/Image"
typedef enum
{
UP,
DOWN
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
background;
ssize_t
x;
/*
Y Shear image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
background=image->background_color;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,width,1)
#endif
for (x=0; x < (ssize_t) width; x++)
{
double
area,
displacement;
PixelInfo
pixel,
source,
destination;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows,
exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=y_offset*GetPixelChannels(image);
displacement=degrees*(double) (x-width/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=DOWN;
else
{
displacement*=(-1.0);
direction=UP;
}
step=(ssize_t) floor((double) displacement);
area=(double) (displacement-step);
step++;
pixel=background;
GetPixelInfo(image,&source);
GetPixelInfo(image,&destination);
switch (direction)
{
case UP:
{
/*
Transfer pixels top-to-bottom.
*/
if (step > y_offset)
break;
q=p-step*GetPixelChannels(image);
for (i=0; i < (ssize_t) height; i++)
{
if ((y_offset+i) < step)
{
p+=GetPixelChannels(image);
GetPixelInfoPixel(image,p,&pixel);
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,
&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
q+=GetPixelChannels(image);
for (i=0; i < (step-1); i++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
break;
}
case DOWN:
{
/*
Transfer pixels bottom-to-top.
*/
p+=height*GetPixelChannels(image);
q=p+step*GetPixelChannels(image);
for (i=0; i < (ssize_t) height; i++)
{
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
if ((size_t) (y_offset+height+step-i) > image->rows)
continue;
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,
&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&destination,q);
for (i=0; i < (step-1); i++)
{
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&background,q);
}
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,YShearImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearImage() creates a new image that is a shear_image copy of an existing
% one. Shearing slides one edge of an image along the X or Y axis, creating
% a parallelogram. An X direction shear slides an edge along the X axis,
% while a Y direction shear slides an edge along the Y axis. The amount of
% the shear is controlled by a shear angle. For X direction shears, x_shear
% is measured relative to the Y axis, and similarly, for Y direction shears
% y_shear is measured relative to the X axis. Empty triangles left over from
% shearing the image are filled with the background color defined by member
% 'background_color' of the image.. ShearImage() allocates the memory
% necessary for the new Image structure and returns a pointer to the new image.
%
% ShearImage() is based on the paper "A Fast Algorithm for General Raster
% Rotatation" by Alan W. Paeth.
%
% The format of the ShearImage method is:
%
% Image *ShearImage(const Image *image,const double x_shear,
% const double y_shear,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear: Specifies the number of degrees to shear the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearImage(const Image *image,const double x_shear,
const double y_shear,ExceptionInfo *exception)
{
Image
*integral_image,
*shear_image;
MagickBooleanType
status;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
/*
Initialize shear angle.
*/
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0))));
shear.y=tan(DegreesToRadians(fmod(y_shear,360.0)));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception);
/*
Compute image size.
*/
bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5);
bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)-
image->columns)/2.0-0.5);
bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)-
image->rows)/2.0-0.5);
/*
Surround image with border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
shear_image=BorderImage(integral_image,&border_info,image->compose,exception);
integral_image=DestroyImage(integral_image);
if (shear_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Shear the image.
*/
if (shear_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception);
status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x,
(ssize_t) (shear_image->rows-image->rows)/2,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t)
(shear_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType)
image->columns,(MagickRealType) image->rows,MagickFalse,exception);
shear_image->alpha_trait=image->alpha_trait;
shear_image->compose=image->compose;
shear_image->page.width=0;
shear_image->page.height=0;
if (status == MagickFalse)
shear_image=DestroyImage(shear_image);
return(shear_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearRotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. ShearRotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% ShearRotateImage() is based on the paper "A Fast Algorithm for General
% Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a
% similar method based on the Paeth paper written by Michael Halle of the
% Spatial Imaging Group, MIT Media Lab.
%
% The format of the ShearRotateImage method is:
%
% Image *ShearRotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearRotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*integral_image,
*rotate_image;
MagickBooleanType
status;
MagickRealType
angle;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
size_t
height,
rotations,
shear_width,
width;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
if (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
/*
Calculate shear equations.
*/
integral_image=IntegralRotateImage(image,rotations,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception);
/*
Compute maximum bounds for 3 shear operations.
*/
width=integral_image->columns;
height=integral_image->rows;
bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5);
bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5);
shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+
bounds.width+0.5);
bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width :
bounds.width-shear_width+2)/2.0+0.5);
bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5);
/*
Surround image with a border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
rotate_image=BorderImage(integral_image,&border_info,image->compose,
exception);
integral_image=DestroyImage(integral_image);
if (rotate_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Rotate the image.
*/
status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t)
(rotate_image->rows-height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t)
(rotate_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t)
(rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows-
bounds.height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width,
(MagickRealType) height,MagickTrue,exception);
rotate_image->alpha_trait=image->alpha_trait;
rotate_image->compose=image->compose;
rotate_image->page.width=0;
rotate_image->page.height=0;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
|
LAGraph_bfs_pushpull.c | //------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: push-pull breadth-first search
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2020 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: direction-optimized push/pull breadth first search,
// contributed by Tim Davis, Texas A&M.
// LAGraph_bfs_pushpull computes the BFS of a graph from a single given
// source node. The result is a vector v where v(i)=k if node i was placed
// at level k in the BFS.
// Usage:
// info = LAGraph_bfs_pushpull (&v, &pi, A, AT, source, max_level, vsparse) ;
// GrB_Vector *v: a vector containing the result, created on output.
// v(i) = k is the BFS level of node i in the graph, where a source
// node has v(source)=1. v(i) is implicitly zero if it is unreachable
// from the source node. That is, GrB_Vector_nvals (&nreach,v) is the
// size of the reachable set of the source node, for a single-source
// BFS. v may be returned as sparse, or full. If full, v(i)=0
// indicates that node i was not reached. If sparse, the pattern of v
// indicates the set of nodes reached.
// GrB_Vector *pi: a vector containing the BFS tree, in 1-based indexing.
// pi(source) = source+1 for source node. pi(i) = p+1 if p is the
// parent of i. If pi is sparse, and pi(i) is not present, then node
// i has not been reached. Otherwise, if pi is full, then pi(i)=0
// indicates that node i was not reached.
// GrB_Matrix A: a square matrix of any type. The values of A are not
// accessed. The presence of the entry A(i,j) indicates the edge
// (i,j). That is, an explicit entry A(i,j)=0 is treated as an edge.
// GrB_Matrix AT: an optional matrix of any type. If NULL, the algorithm
// is a conventional push-only BFS. If not NULL, AT must be the
// transpose of A, and a push-pull algorithm is used (NOTE: this
// assumes GraphBLAS stores its matrix in CSR form; see discussion
// below). Results are undefined if AT is not NULL but not identical
// to the transpose of A.
// int64_t source: the source node for the BFS.
// int64_t max_level: An optional limit on the levels searched for the
// single-source BFS. If zero, then no limit is enforced. If > 0,
// then only nodes with v(i) <= max_level will be visited. That is:
// 1: just the source node, 2: the source and its neighbors, 3: the
// source node, its neighbors, and their neighbors, etc.
// bool vsparse: if the result v may remain very sparse, then set this
// parameter to true. If v might have many entries, set it false. If
// you are unsure, then set it to true. This parameter speeds up
// the handling of v. If you guess wrong, there is a slight
// performance penalty. The results are not affected by this
// parameter, just the performance. This parameter is used only for
// the single-source BFS.
// single-source BFS:
// Given a graph A, a source node, find all nodes reachable from the
// source node. v(source)=1, v(i)=2 if edge (source,i) appears in the
// graph, and so on. If node i is not reachable from source, then
// implicitly v(i)=0. v is returned as a sparse vector, and v(i) is not
// an entry in this vector.
// This algorithm can use the push-pull strategy, which requires both A and
// AT=A' to be passed in. If the graph is known to be symmetric, then the same
// matrix A can be passed in for both arguments. Results are undefined if AT
// is not the transpose of A.
// If only A or AT is passed in, then only single strategy will be used: push
// or pull, but not both. In general, push-only performs well. A pull-only
// strategy is possible but it is exceedingly slow. Assuming A and AT are both
// in CSR format, then (let s = source node):
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // pull-only (slow!)
// If A and AT are both in CSC format, then:
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // pull-only (slow!)
// Since the pull-only method is exceedingly slow, SuiteSparse:GraphBLAS
// detects this case and refuses to do it.
// The basic step of this algorithm computes A'*q where q is the 'queue' of
// nodes in the current level. This can be done with GrB_vxm(q,A) = (q'*A)' =
// A'*q, or by GrB_mxv(AT,q) = AT*q = A'*q. Both steps compute the same thing,
// just in a different way. In GraphBLAS, unlike MATLAB, a GrB_Vector is
// simultaneously a row and column vector, so q and q' are interchangeable.
// To implement an efficient BFS using GraphBLAS, an assumption must be made in
// LAGraph about how the matrix is stored, whether by row or by column (or
// perhaps some other opaque data structure). The storage format has a huge
// impact on the relative performance of vxm(q,A) and mxv(AT,q).
// Storing A by row, if A(i,j) is the edge (i,j), means that A(i,:) is easily
// accessible. In terms of the graph A, this means that the out-adjacency
// list of node i can be traversed in time O(out-degree of node i).
// If AT is stored by row, then AT(i,:) is the in-adjacency list of node i,
// and traversing row i of AT can be done in O(in-degree of node i) time.
// The CSR (Compressed Sparse Row) format is the default for
// SuiteSparse:GraphBLAS, but no assumption can be made about any particular
// GraphBLAS library implementation.
// If A and AT are both stored by column instead, then A(i,:) is not easy to
// access. Instead, A(:,i) is the easily-accessible in-adjacency of node i,
// and AT(:,i) is the out-adjancency.
// A push step requires the out-adjacencies of each node, where as
// a pull step requires the in-adjacencies of each node.
// vxm(q,A) = A'*q, with A stored by row: a push step
// mxv(AT,q) = A'*q, with AT stored by row: a pull step
// vxm(q,A) = A'*q, with A stored by col: a pull step
// mxv(AT,q) = A'*q, with AT stored by col: a push step
// The GraphBLAS data structure is opaque. An implementation may decide to
// store the matrix A in both formats, internally, so that it easily traverse
// both in- and out-adjacencies of each node (equivalently, A(i,:) and A(:,i)
// can both be easily traversed). This would make a push-pull BFS easy to
// implement using just the opaque GrB_Matrix A, but it doubles the storage.
// Deciding which format to use automatically is not a simple task,
// particularly since the decision must work well throughout GraphBLAS, not
// just for the BFS.
// MATLAB stores its sparse matrices in CSC format (Compressed Sparse Column).
// As a result, the MATLAB expression x=AT*q is a push step, computed using a
// saxpy-based algorithm internally, and x=A'*q is a pull step, computed using
// a dot product.
// SuiteSparse:GraphBLAS can store a matrix in either format, but this requires
// an extension to the GraphBLAS C API (GxB_set (A, GxB_FORMAT, f)). where
// f = GxB_BY_ROW (that is, CSR) or GxB_BY_COL (that is, CSC). The library
// could be augmented in the future with f = Gxb_BY_BOTH. It currently does
// not select the format automatically. As a result, if GxB_set is not used,
// all its GrB_Matrix objects are stored by row (CSR).
// SuiteSparse:GraphBLAS allows the user to query (via GxB_get) an set (via
// GxB_set) the format, whether by row or by column. The hypersparsity of
// A is selected automatically, with optional hints from the user application,
// but a selection between hypersparsity vs standard CSR and CSC has no effect
// on the push vs pull decision made here.
// The push/pull and saxpy/dot connection can be described as follows.
// Assume for these first two examples that MATLAB stores its matrices in CSR
// format, where accessing A(i,:) is fast.
// If A is stored by row, then x = vxm(q,A) = q'*A can be written in MATLAB
// notation as:
/*
function x = vxm (q,A)
% a push step: compute x = q'*A where q is a column vector
x = sparse (1,n)
for i = 1:n
% a saxpy operation, using the ith row of A and the scalar q(i)
x = x + q (i) * A (i,:)
end
*/
// If AT is stored by row, then x = mvx(AT,q) = AT*q = A'*q becomes
// a dot product:
/*
function x = mxv (AT,q)
% a pull step: compute x = AT*q where q is a column vector
for i = 1:n
% a dot-product of the ith row of AT and the column vector q
x (i) = AT (i,:) * q
end
*/
// The above snippets describe how SuiteSparse:GraphBLAS computes vxm(q,A) and
// mxv(AT,q) by default, where A and AT are stored by row by default. However,
// they would be very slow in MATLAB, since it stores its sparse matrices in
// CSC format. In that case, if A is stored by column and thus accessing
// A(:,j) is efficient, then x = vxm(q,A) = q'*A becomes the dot product
// instead. These two snippets assume the matrices are both in CSR for, and
// thus make more efficient use of MATLAB:
/*
function x = vxm (q,A)
% a pull step: compute x = q'*A where q is a column vector
for j = 1:n
% a dot product of the row vector q' and the jth column of A
x (j) = q' * A (:,j)
end
*/
// If AT is stored by column, then x = mvx(AT,q) is
/*
function x = mxv (AT,q)
% a push step: compute x = AT*q where q is a column vector
for j = 1:n
% a saxpy operation, using the jth column of AT and the scalar q(i)
x = x + AT (:,j) * q
end
*/
// In MATLAB, if q is a sparse column vector and A is a sparse matrix, then
// x=A*q does in fact use a saxpy-based method, internally, and x=A'*q uses a
// dot product. You can view the code used internally in MATLAB for its sparse
// matrix multiplication in the SuiteSparse/MATLAB_Tools/SSMULT and SFMULT
// packages, at http://suitesparse.com.
// This raises an interesting puzzle for LAGraph, which is intended on being a
// graph library that can be run on any implementation of GraphBLAS. There are
// no mechanisms in the GraphBLAS C API for LAGraph (or other external packages
// or user applications) to provide hints to GraphBLAS. Likely, there are no
// query mechanisms where LAGraph can ask GraphBLAS how its matrices might be
// stored (LAGraphs asks, "Is A(i,:) fast? Or A(:,j)? Or both?"; the answer
// from GraphBLAS is silence). The GraphBLAS data structure is opaque, and it
// does not answer this query.
// There are two solutions to this puzzle. The most elegant one is for
// GraphBLAS to handle all this internally, and change formats as needed. It
// could choose to store A in both CSR and CSC format, or use an entirely
// different data structure, and it would make the decision between the push or
// pull, at each step of the BFS. This is not a simple task since the API is
// complex. Furthermore, the selection of the data structure for A has
// implications on all other GraphBLAS operations (submatrix assignment and
// extraction, for example).
// However, if A were to be stored in both CSR and CSC format, inside the
// opaque GraphBLAS GrB_Matrix data structure, then LAGraph_bfs_simple would
// become a push-pull BFS.
// The second solution is to allow the user application or library such as
// LAGraph to provide hints and allow it to query the GraphBLAS library.
// There are no such features in the GraphBLAS C API.
// SuiteSparse:GraphBLAS takes the second approach: It adds two functions that
// are extensions to the API: GxB_set changes the format (CSR or CSC), and
// GxB_get can query the format. Even this this simplication,
// SuiteSparse:GraphBLAS uses 24 different algorithmic variants inside GrB_mxm
// (per semiring), and selects between them automatically. By default, all of
// its matrices are stored in CSR format (either sparse or hypersparse,
// selected automatically). So if no GxB_* extensions are used, all matrices
// are in CSR format.
// If a GraphBLAS library other than SuiteSparse:GraphBLAS is in use, this
// particular function assumes that its input matrices are in CSR format, or at
// least A(i,:) and AT(i,:) can be easily accessed. With this assumption, it
// is the responsibilty of this function to select between using a push or a
// pull, for each step in the BFS.
// The following analysis assumes CSR format, and it assumes that dot-product
// (a pull step) can terminate early via a short-circuit rule with the OR
// monoid, as soon as it encounters a TRUE value. This cuts the time for the
// dot-product. Not all GraphBLAS libraries may use this, but SuiteSparse:
// GraphBLAS does (in version 2.3.0 and later). Early termination cannot be
// done for the saxpy (push step) method.
// The work done by the push method (saxpy) is very predictable. BFS uses a
// complemented mask. There is no simple way to exploit a complemented mask,
// and saxpy has no early termination rule. If the set of nodes in the current
// level is q, the work is nnz(A(q,:)). If d = nnz(A)/n is the average degree,
// this becomes d*nq where nq = length (q):
// pushwork = d*nq
// The work done by the pull (dot product) method is less predictable. It can
// exploit the complemented mask, and so it only computes (n-nvisited) dot
// products, if nvisited is the # of nodes visited so far (in all levels).
// With no early-termination, the dot product will take d * log2 (nq) time,
// assuming that q is large and a binary search is used internally. That is,
// the dot product will scan through the d entries in A(i,:), and do a binary
// search for each entry in q. To account for the higher constant of a binary
// search, log2(nq) is replaced with (3*(1+log2(nq))). With early termination,
// d is too high. If the nodes are randomly marked, the probability of each
// node being marked is nvisited/n. The expected number of trials until
// success, for a sequence of events with probabilty p, is 1/p. Thus, the
// expected number of iterations in a dot product before an early termination
// is 1/p = (n/nvisited+1), where +1 is added to avoid a divide by zero.
// However, it cannot exceed d. Thus, the total work for the dot product
// (pull) method can be estimated as:
// per_dot = min (d, n / (nvisited+1))
// pullwork = (n-nvisited) * per_dot * (3 * (1 + log2 ((double) nq)))
// The above expressions are valid for SuiteSparse:GraphBLAS v2.3.0 and later,
// and may be reasonable for other GraphBLAS implementations. Push or pull
// is selected as the one with the least work.
// TODO: change the formula for v3.2.0
// The push/pull decision requires that both A and AT be passed in, but this
// function can use just one or the other. If only A is passed in and AT is
// NULL, then only vxm(q,A) will be used (a push step if A is CSR, or a pull
// step if A is CSC). If only AT is passed in and A is NULL, then only
// mxv(AT,q) will be used (a pull step if AT is CSR, or a push step if AT is
// CSC).
// In general, while a push-pull strategy is the fastest, a push-only BFS will
// give good peformance. In particular, the time to compute AT=A' plus the
// time for the push-pull BFS is typically higher than just a push-only BFS.
// This why this function does not compute AT=A'. To take advantage of the
// push-pull method, both A and AT must already be available, with the cost to
// construct them amortized across other computations such as this one.
// A pull-only strategy will be *exceeding* slow.
// The input matrix A must be square. It can be non-binary, but best
// performance will be obtained if it is GrB_BOOL. It can have explicit
// entries equal to zero. These are safely ignored, and are treated as
// non-edges.
// SuiteSparse:GraphBLAS can detect the CSR vs CSC format of its inputs.
// In this case, if both matrices are provided, they must be in the same
// format (both GxB_BY_ROW or both GxB_BY_COL). If the matrices are in CSC
// format, vxm(q,A) is the pull step and mxv(AT,q) is the push step.
// If only A or AT are provided, and the result is a pull-only algorithm,
// an error is returned.
// References:
// Carl Yang, Aydin Buluc, and John D. Owens. 2018. Implementing Push-Pull
// Efficiently in GraphBLAS. In Proceedings of the 47th International
// Conference on Parallel Processing (ICPP 2018). ACM, New York, NY, USA,
// Article 89, 11 pages. DOI: https://doi.org/10.1145/3225058.3225122
// Scott Beamer, Krste Asanovic and David A. Patterson,
// The GAP Benchmark Suite, http://arxiv.org/abs/1508.03619, 2015.
// http://gap.cs.berkeley.edu/
#include "LAGraph_bfs_pushpull.h"
#include "../configuration/config.h"
#define LAGRAPH_FREE_ALL \
{ \
GrB_free (&v) ; \
GrB_free (&t) ; \
GrB_free (&q) ; \
GrB_free (&pi) ; \
}
#define LAGRAPH_ERROR(message,info) \
{ \
fprintf (stderr, "LAGraph error: %s\n[%d]\nFile: %s Line: %d\n", \
message, info, __FILE__, __LINE__) ; \
LAGRAPH_FREE_ALL ; \
return (info) ; \
}
#define LAGRAPH_MAX(x,y) (((x) > (y)) ? (x) : (y))
#define LAGRAPH_MIN(x,y) (((x) < (y)) ? (x) : (y))
GrB_Info LAGraph_bfs_pushpull // push-pull BFS, or push-only if AT = NULL
(
GrB_Vector *v_output, // v(i) is the BFS level of node i in the graph
GrB_Vector *pi_output, // pi(i) = p+1 if p is the parent of node i.
// if NULL, the parent is not computed.
GrB_Matrix A, // input graph, treated as if boolean in semiring
GrB_Matrix AT, // transpose of A (optional; push-only if NULL)
int64_t source, // starting node of the BFS
int64_t *dest, // optional destination node of the BFS
int64_t max_level, // optional limit of # levels to search
bool vsparse // if true, v is expected to be very sparse
) {
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Vector q = NULL ; // nodes visited at each level
GrB_Vector v = NULL ; // result vector
GrB_Vector t = NULL ; // temporary vector
GrB_Vector pi = NULL ; // parent vector
if(v_output == NULL || (A == NULL && AT == NULL)) {
// required output argument is missing
LAGRAPH_ERROR("required arguments are NULL", GrB_NULL_POINTER) ;
}
(*v_output) = NULL ;
bool compute_tree = (pi_output != NULL) ;
GrB_Index nrows, ncols, nvalA, ignore, nvals ;
// A is provided. AT may or may not be provided
GrB_Matrix_nrows(&nrows, A) ;
GrB_Matrix_ncols(&ncols, A) ;
GrB_Matrix_nvals(&nvalA, A) ;
bool use_vxm_with_A = true ;
// push/pull requires both A and AT
bool push_pull = (A != NULL && AT != NULL) ;
if(nrows != ncols) {
// A must be square
LAGRAPH_ERROR("A must be square", GrB_NULL_POINTER) ;
}
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
GrB_Index n = nrows ;
int nthreads;
Config_Option_get(Config_OPENMP_NTHREAD, &nthreads);
nthreads = LAGRAPH_MIN(n / 4096, nthreads) ;
nthreads = LAGRAPH_MAX(nthreads, 1) ;
// just traverse from the source node
max_level = (max_level <= 0) ? n : LAGRAPH_MIN(n, max_level) ;
// create an empty vector v
GrB_Type int_type = (n > INT32_MAX) ? GrB_INT64 : GrB_INT32 ;
GrB_Vector_new(&v, int_type, n) ;
// make v dense if requested
int64_t vlimit = LAGRAPH_MAX(256, sqrt((double) n)) ;
if(!vsparse) {
// v is expected to have many entries, so convert v to dense.
// If the guess is wrong, v can be made dense later on.
GrB_assign(v, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
// create a scalar to hold the destination value
GrB_Index dest_val ;
GrB_Semiring first_semiring, second_semiring ;
if(compute_tree) {
// create an integer vector q, and set q(source) to source+1
GrB_Vector_new(&q, int_type, n) ;
GrB_Vector_setElement(q, source + 1, source) ;
if(n > INT32_MAX) {
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT64 ;
second_semiring = GxB_ANY_SECOND_INT64 ;
} else {
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT32 ;
second_semiring = GxB_ANY_SECOND_INT32 ;
}
// create the empty parent vector
GrB_Vector_new(&pi, int_type, n) ;
if(!vsparse) {
// make pi a dense vector of all zeros
GrB_assign(pi, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
// pi (source) = source+1 denotes a root of the BFS tree
GrB_Vector_setElement(pi, source + 1, source) ;
} else {
// create a boolean vector q, and set q(source) to true
GrB_Vector_new(&q, GrB_BOOL, n) ;
GrB_Vector_setElement(q, true, source) ;
// terminates as soon as it finds any pair
first_semiring = GxB_ANY_PAIR_BOOL ;
second_semiring = GxB_ANY_PAIR_BOOL ;
}
// average node degree
double d = (n == 0) ? 0 : (((double) nvalA) / (double) n) ;
int64_t nvisited = 0 ; // # nodes visited so far
GrB_Index nq = 1 ; // number of nodes in the current level
//--------------------------------------------------------------------------
// BFS traversal and label the nodes
//--------------------------------------------------------------------------
for(int64_t level = 1 ; ; level++) {
//----------------------------------------------------------------------
// set v to the current level, for all nodes in q
//----------------------------------------------------------------------
// v<q> = level: set v(i) = level for all nodes i in q
GrB_assign(v, q, NULL, level, GrB_ALL, n, GrB_DESC_S) ;
//----------------------------------------------------------------------
// check if done
//----------------------------------------------------------------------
nvisited += nq ;
if(nq == 0 || nvisited == n || level >= max_level) break ;
//----------------------------------------------------------------------
// check if destination has been reached, if one is provided
//----------------------------------------------------------------------
if(dest) {
GrB_Info res = GrB_Vector_extractElement(&dest_val, v, *dest) ;
if(res != GrB_NO_VALUE) break ;
}
//----------------------------------------------------------------------
// check if v should be converted to dense
//----------------------------------------------------------------------
if(vsparse && nvisited > vlimit) {
// Convert v from sparse to dense to speed up the rest of the work.
// If this case is triggered, it would have been faster to pass in
// vsparse = false on input.
// v <!v> = 0
GrB_assign(v, v, NULL, 0, GrB_ALL, n, GrB_DESC_SC) ;
GrB_Vector_nvals(&ignore, v) ;
if(compute_tree) {
// Convert pi from sparse to dense, to speed up the work.
// pi<!pi> = 0
GrB_assign(pi, pi, NULL, 0, GrB_ALL, n, GrB_DESC_SC) ;
GrB_Vector_nvals(&ignore, pi) ;
}
vsparse = false ;
}
//----------------------------------------------------------------------
// select push vs pull
//----------------------------------------------------------------------
if(push_pull) {
double pushwork = d * nq ;
double expected = (double) n / (double)(nvisited + 1) ;
double per_dot = LAGRAPH_MIN(d, expected) ;
double binarysearch = (3 * (1 + log2((double) nq))) ;
double pullwork = (n - nvisited) * per_dot * binarysearch ;
use_vxm_with_A = (pushwork < pullwork) ;
}
//----------------------------------------------------------------------
// q = next level of the BFS
//----------------------------------------------------------------------
if(use_vxm_with_A) {
// q'<!v> = q'*A
// this is a push step if A is in CSR format; pull if CSC
GrB_vxm(q, v, NULL, first_semiring, q, A, GrB_DESC_RC) ;
} else {
// q<!v> = AT*q
// this is a pull step if AT is in CSR format; push if CSC
GrB_mxv(q, v, NULL, second_semiring, AT, q, GrB_DESC_RC) ;
}
//----------------------------------------------------------------------
// move to next level
//----------------------------------------------------------------------
if(compute_tree) {
//------------------------------------------------------------------
// assign parents
//------------------------------------------------------------------
// q(i) currently contains the parent of node i in tree (off by one
// so it won't have any zero values, for valued mask).
// pi<q> = q
GrB_assign(pi, q, NULL, q, GrB_ALL, n, GrB_DESC_S) ;
//------------------------------------------------------------------
// replace q with current node numbers
//------------------------------------------------------------------
// TODO this could be a unaryop
// q(i) = i+1 for all entries in q.
GrB_Index *qi ;
bool jumbled ;
int64_t q_size ;
GrB_Index qi_size, qx_size ;
if(n > INT32_MAX) {
int64_t *qx ;
GxB_Vector_export_CSC(&q, &int_type, &n,
&qi, (void **) (&qx), &qi_size, &qx_size, &nq,
&jumbled, NULL) ;
int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ;
nth = LAGRAPH_MAX(nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for(int64_t k = 0 ; k < nq ; k++) {
qx [k] = qi [k] + 1 ;
}
GxB_Vector_import_CSC(&q, int_type, n,
&qi, (void **) (&qx), qi_size, qx_size, nq,
jumbled, NULL) ;
} else {
int32_t *qx ;
GxB_Vector_export_CSC(&q, &int_type, &n,
&qi, (void **) (&qx), &qi_size, &qx_size, &nq,
&jumbled, NULL) ;
int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ;
nth = LAGRAPH_MAX(nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for(int32_t k = 0 ; k < nq ; k++) {
qx [k] = qi [k] + 1 ;
}
GxB_Vector_import_CSC(&q, int_type, n,
&qi, (void **) (&qx), qi_size, qx_size, nq,
jumbled, NULL) ;
}
} else {
//------------------------------------------------------------------
// count the nodes in the current level
//------------------------------------------------------------------
GrB_Vector_nvals(&nq, q) ;
}
}
//--------------------------------------------------------------------------
// return the parent vector, if computed
//--------------------------------------------------------------------------
if(compute_tree) {
(*pi_output) = pi ;
pi = NULL ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
(*v_output) = v ; // return result
v = NULL ; // set to NULL so LAGRAPH_FREE_ALL doesn't free it
LAGRAPH_FREE_ALL ; // free all workspace (except for result v)
return (GrB_SUCCESS) ;
}
|
simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}}
#pragma omp simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}}
#pragma omp simd foo
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}}
#pragma omp simd safelen(4)
void test_no_clause() {
int i;
#pragma omp simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp simd' must be a for loop}}
#pragma omp simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}}
#pragma omp simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}}
#pragma omp simd;
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}}
// expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}}
#pragma omp simd firstprivate(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}}
#pragma omp simd private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}}
#pragma omp simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(4, )
for (i = 0; i < 16; ++i)
;
// xxpected-error@+1 {{expected expression}}
#pragma omp simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp simd collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp simd collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
// xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
#pragma omp simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp simd collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp simd collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp simd collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as reduction}}
#pragma omp parallel
#pragma omp simd collapse(2) reduction(+ : i)
for (i = 0; i < 16; ++i)
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
#pragma omp parallel
#pragma omp for
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
#pragma omp simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd linear(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd linear(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp simd linear(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd linear()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd linear(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp simd linear(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp simd linear(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp simd linear(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// expected-error@+1 {{expected expression}}
#pragma omp simd linear(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd linear(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp simd linear(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp simd linear(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd linear(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd linear(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be linear}}
#pragma omp simd linear(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as private}}
// expected-error@+1 {{private variable cannot be linear}}
#pragma omp simd private(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be private}}
#pragma omp simd linear(x) private(x)
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}}
#pragma omp simd linear(x, y : 0)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be lastprivate}}
#pragma omp simd linear(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as lastprivate}}
// expected-error@+1 {{lastprivate variable cannot be linear}}
#pragma omp simd lastprivate(x) linear(x)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd aligned(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp simd aligned(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd aligned()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd aligned(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp simd aligned(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp simd aligned(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd aligned(z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd aligned(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp simd private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp simd private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_firstprivate() {
int i;
// expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}}
// expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}}
// expected-error@+1 {{expected expression}}
#pragma omp simd firstprivate(
for (i = 0; i < 16; ++i)
;
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp simd lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp simd lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_reduction() {
int i, x, y;
// expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}}
// expected-error@+2 {{expected identifier}}
// expected-warning@+1 {{missing ':' after reduction identifier - ignoring}}
#pragma omp simd reduction(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected identifier}}
// expected-warning@+1 {{missing ':' after reduction identifier - ignoring}}
#pragma omp simd reduction()
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-warning@+1 {{missing ':' after reduction identifier - ignoring}}
#pragma omp simd reduction(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected identifier}}
#pragma omp simd reduction( : x)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}}
// expected-error@+2 {{expected identifier}}
// expected-warning@+1 {{missing ':' after reduction identifier - ignoring}}
#pragma omp simd reduction(,
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}}
// expected-error@+2 {{expected expression}}
// expected-warning@+1 {{missing ':' after reduction identifier - ignoring}}
#pragma omp simd reduction(+
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}}
//
// expected-error@+1 {{expected expression}}
#pragma omp simd reduction(+:
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd reduction(+ :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd reduction(+ :, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd reduction(+ : x, + : y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected identifier}}
#pragma omp simd reduction(% : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(+ : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(* : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(- : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(& : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(| : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(^ : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(&& : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(|| : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(max : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(min : x)
for (i = 0; i < 16; ++i)
;
struct X {
int x;
};
struct X X;
// expected-error@+1 {{expected variable name}}
#pragma omp simd reduction(+ : X.x)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp simd reduction(+ : x + x)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void linear_modifiers(int argc) {
int f;
#pragma omp simd linear(f)
for (int k = 0; k < argc; ++k) ++k;
#pragma omp simd linear(val(f))
for (int k = 0; k < argc; ++k) ++k;
#pragma omp simd linear(uval(f)) // expected-error {{expected 'val' modifier}}
for (int k = 0; k < argc; ++k) ++k;
#pragma omp simd linear(ref(f)) // expected-error {{expected 'val' modifier}}
for (int k = 0; k < argc; ++k) ++k;
#pragma omp simd linear(foo(f)) // expected-error {{expected 'val' modifier}}
for (int k = 0; k < argc; ++k) ++k;
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(4*t1+Ny+5,32)),floord(8*t2+Ny+4,32)),floord(8*t1-8*t2+Nz+Ny+3,32));t3++) {
for (t4=max(max(max(0,ceild(t1-511,512)),ceild(8*t2-Nz-2044,2048)),ceild(32*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(4*t1+Nx+5,2048)),floord(8*t2+Nx+4,2048)),floord(32*t3+Nx+28,2048)),floord(8*t1-8*t2+Nz+Nx+3,2048));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),32*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),32*t3+30),2048*t4+2046),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
symv_x_dia_u_lo_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_DIA* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
#ifdef COMPLEX
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num);
for(int i = 0; i < thread_num; ++i)
{
tmp[i] = malloc(sizeof(ALPHA_Number) * m);
memset(tmp[i], 0, sizeof(ALPHA_Number) * m);
}
const ALPHA_INT diags = A->ndiag;
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < diags; ++i)
{
const ALPHA_INT threadId = alpha_get_thread_id();
const ALPHA_INT dis = A->distance[i];
if(dis < 0)
{
const ALPHA_INT row_start = -dis;
const ALPHA_INT col_start = 0;
const ALPHA_INT nnz = m + dis;
const ALPHA_INT start = i * A->lval;
for(ALPHA_INT j = 0; j < nnz; ++j)
{
ALPHA_Number v;
alpha_mul_3c(v, alpha, A->values[start + row_start + j]);
alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]);
alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]);
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
alpha_madde(y[i], alpha, x[i]);
for(ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(y[i], y[i], tmp[j][i]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
alpha_free(tmp[i]);
}
alpha_free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
#else
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
#endif
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_DIA* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
#ifdef COMPLEX
return ONAME_omp(alpha, A, x, beta, y);
#else
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
#endif
}
|
mkl_convolution-inl.h | /*******************************************************************************
* Copyright 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file mkl_convolution-inl.h
* \brief
* \author lingyan.guo@intel.com
* zhenlin.luo@intel.com
*
*******************************************************************************/
#ifndef MXNET_OPERATOR_MKL_MKL_CONVOLUTION_INL_H_
#define MXNET_OPERATOR_MKL_MKL_CONVOLUTION_INL_H_
#include <mxnet/storage.h>
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "../operator_common.h"
#include "../convolution-inl.h"
#include "./mkl_util-inl.h"
namespace mxnet {
namespace op {
template<typename xpu, typename DType>
class MKLConvolutionOp : public Operator {
public:
static std::string getName() {
return "MKLConvolutionOp";
}
void SetupBuffer() {
convolutionBwdBias = static_cast<dnnPrimitive_t>(NULL);
convolutionBwdFilter = static_cast<dnnPrimitive_t>(NULL);
convolutionBwdData = static_cast<dnnPrimitive_t>(NULL);
convolutionFwd = static_cast<dnnPrimitive_t>(NULL);
fwd_bottom_data = MKLData<DType>::create();
fwd_top_data = MKLData<DType>::create();
fwd_filter_data = MKLData<DType>::create();
fwd_bias_data = MKLData<DType>::create();
bwdd_top_diff = MKLData<DType>::create();
bwdd_bottom_diff = MKLData<DType>::create();
bwdd_filter_data = MKLData<DType>::create();
bwdf_top_diff = MKLData<DType>::create();
bwdf_filter_diff = MKLData<DType>::create();
bwdf_bottom_data = MKLData<DType>::create();
bwdb_top_diff = MKLData<DType>::create();
bwdb_bias_diff = MKLData<DType>::create();
// Names are for debugging purposes only.
fwd_bottom_data->name = "fwd_bottom_data @ " + this->getName();
fwd_top_data->name = "fwd_top_data @ " + this->getName();
fwd_filter_data->name = "fwd_filter_data @ " + this->getName();
fwd_bias_data->name = "fwd_bias_data @ " + this->getName();
bwdd_top_diff->name = "bwdd_top_diff @ " + this->getName();
bwdd_bottom_diff->name = "bwdd_bottom_diff @ " + this->getName();
bwdd_filter_data->name = "bwdd_filter_data @ " + this->getName();
bwdf_top_diff->name = "bwdf_top_diff @ " + this->getName();
bwdf_bottom_data->name = "bwdf_bottom_data @ " + this->getName();
bwdf_filter_diff->name = "bwdf_filter_diff @ " + this->getName();
bwdb_top_diff->name = "bwdb_top_diff @ " + this->getName();
bwdb_bias_diff->name = "bwdb_bias_diff @ " + this->getName();
}
explicit MKLConvolutionOp(ConvolutionParam p):
convolutionFwd(NULL),
convolutionBwdData(static_cast<dnnPrimitive_t>(NULL)),
convolutionBwdFilter(static_cast<dnnPrimitive_t>(NULL)),
convolutionBwdBias(static_cast<dnnPrimitive_t>(NULL)) {
this->param_ = p;
init_mkldnn_ = false;
// convert MBytes first to Bytes and then to elements.
param_.workspace = (param_.workspace << 20) / sizeof(DType);
SetupBuffer();
}
void ReleaseBuffer() {
if (convolutionFwd != NULL) {
dnnDelete<DType>(convolutionFwd);
convolutionFwd = NULL;
}
if (convolutionBwdData != NULL) {
dnnDelete<DType>(convolutionBwdData);
convolutionBwdData = NULL;
}
if (convolutionBwdFilter != NULL) {
dnnDelete<DType>(convolutionBwdFilter);
convolutionBwdFilter = NULL;
}
if (!param_.no_bias && convolutionBwdBias != NULL) {
dnnDelete<DType>(convolutionBwdBias);
convolutionBwdBias = NULL;
}
}
virtual ~MKLConvolutionOp() {
ReleaseBuffer();
}
private:
void LayerSetUp(const mshadow::Tensor<xpu, 4, DType> &data,
const mshadow::Tensor<xpu, 4, DType> &out) {
this->width_ = data.shape_[3];
this->height_ = data.shape_[2];
this->channels_ = data.shape_[1];
this->num_ = data.shape_[0];
this->group_ = param_.num_group;
this->width_out_ = out.shape_[3];
this->height_out_ = out.shape_[2];
int channel_out_ = out.shape_[1];
this->num_output_ = channel_out_;
kernel_w_ = param_.kernel[1];
kernel_h_ = param_.kernel[0];
stride_w_ = param_.stride[1];
stride_h_ = param_.stride[0];
pad_w_ = param_.pad[1];
pad_h_ = param_.pad[0];
int status;
size_t n, g;
size_t iw, ih, ic;
size_t ow, oh, oc;
size_t kw, kh;
size_t dimension = 4;
g = std::max(this->group_, 1);
n = this->num_;
iw = this->width_;
ih = this->height_;
ic = this->channels_;
ow = this->width_out_;
oh = this->height_out_;
oc = this->num_output_;
kw = this->kernel_w_;
kh = this->kernel_h_;
oc = this->num_output_;
size_t bdata_sizes[4] = { iw, ih, ic, n };
size_t bdata_strides[4] = { 1, iw, iw*ih, iw*ih*ic };
/* starting with MKL 2017 Gold in case of groups filter layout
* becomes 5D, i.e. groups become a separate dimension */
size_t g_mkl2017 = g;
size_t f_dimension = dimension + (g != 1);
if (getMKLBuildDate() < 20160701) {
g_mkl2017 = 1;
f_dimension = dimension;
}
size_t fdata_sizes[5] = { kw, kh, ic / g, oc / g_mkl2017, g_mkl2017 };
size_t fdata_strides[5] = { 1, kw, kw*kh, kw*kh*ic / g, kw*kh*ic / g*oc / g };
size_t bias_sizes[1] = { oc };
size_t bias_strides[1] = { 1 };
size_t tdata_sizes[4] = { ow, oh, oc, n };
size_t tdata_strides[4] = { 1, ow, ow*oh, ow*oh*oc };
size_t convolutionStrides[2] = { this->stride_w_, this->stride_h_ };
int inputOffset[2] = { -this->pad_w_, -this->pad_h_ };
// Names are for debugging purposes only.
/*** convolution section ***/
if (!param_.no_bias) {
status = dnnGroupsConvolutionCreateForwardBias<DType>(&convolutionFwd,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
} else {
status = dnnGroupsConvolutionCreateForward<DType>(&convolutionFwd,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
}
CHECK_EQ(status, 0)
<< "Failed dnnCreateConvolution<DType>(dnnForward) with status "
<< status << "\n";
fwd_bottom_data->create_layouts(convolutionFwd, dnnResourceSrc, dimension,
bdata_sizes, bdata_strides);
fwd_top_data->create_layouts(convolutionFwd, dnnResourceDst, dimension,
tdata_sizes, tdata_strides);
fwd_filter_data->create_layouts(convolutionFwd, dnnResourceFilter,
f_dimension, fdata_sizes, fdata_strides);
if (!param_.no_bias)
fwd_bias_data->create_layouts(convolutionFwd, dnnResourceBias, 1,
bias_sizes, bias_strides);
/*
* Backward by data layer setup
*/
status = dnnGroupsConvolutionCreateBackwardData<DType>(&convolutionBwdData,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
CHECK_EQ(status, 0)
<< "Failed dnnConvolutionCreateBackwardData with status "
<< status << "\n";
bwdd_bottom_diff->create_layouts(convolutionBwdData, dnnResourceDiffSrc,
dimension, bdata_sizes, bdata_strides);
bwdd_top_diff->create_layouts(convolutionBwdData, dnnResourceDiffDst,
dimension, tdata_sizes, tdata_strides);
bwdd_filter_data->create_layouts(convolutionBwdData, dnnResourceFilter,
f_dimension, fdata_sizes, fdata_strides);
/*
* Backward by filter layer setup
*/
status = dnnGroupsConvolutionCreateBackwardFilter<DType>(&convolutionBwdFilter,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
CHECK_EQ(status, 0)
<< "Failed dnnConvolutionCreateBackwardFilter with status "
<< status << "\n";
bwdf_bottom_data->create_layouts(convolutionBwdFilter, dnnResourceSrc,
dimension, bdata_sizes, bdata_strides);
bwdf_top_diff->create_layouts(convolutionBwdFilter, dnnResourceDiffDst,
dimension, tdata_sizes, tdata_strides);
bwdf_filter_diff->create_layouts(convolutionBwdFilter, dnnResourceDiffFilter,
f_dimension, fdata_sizes, fdata_strides);
/*
* Backward by bias layer setup
*/
if (!param_.no_bias) {
status = dnnGroupsConvolutionCreateBackwardBias<DType>(&convolutionBwdBias,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
tdata_sizes);
CHECK_EQ(status, 0)
<< "Failed dnnConvolutionCreateBackwardBias with status "
<< status << "\n";
bwdb_top_diff->create_layouts(convolutionBwdBias, dnnResourceDiffDst,
dimension, tdata_sizes, tdata_strides);
bwdb_bias_diff->create_layouts(convolutionBwdBias, dnnResourceDiffBias, 1,
bias_sizes, bias_strides);
}
}
public:
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
Stream<xpu> *s = ctx.get_stream<xpu>();
DType *data_ptr = NULL;
DType *wmat_ptr = NULL;
DType *out_ptr = NULL;
Tensor<xpu, 4, DType> data =
mkl_experimental_direct_get<xpu, 4, DType>(in_data[conv::kData], s);
Tensor<xpu, 4, DType> out =
mkl_experimental_direct_get<xpu, 4, DType>(out_data[conv::kOut], s);
Tensor<xpu, 4, DType> wmat =
mkl_experimental_direct_get<xpu, 4, DType>(in_data[conv::kWeight], s);
if (!init_mkldnn_) {
LayerSetUp(data, out);
init_mkldnn_ = true;
}
CHECK_EQ(data.CheckContiguous(), true);
CHECK_EQ(wmat.CheckContiguous(), true);
CHECK_EQ(out.CheckContiguous(), true);
data_ptr = data.dptr_;
wmat_ptr = wmat.dptr_;
out_ptr = out.dptr_;
int status;
void *res_convolutionFwd[dnnResourceNumber];
res_convolutionFwd[dnnResourceSrc] =
fwd_bottom_data->get_converted_prv(data_ptr, false, in_data[conv::kData]);
res_convolutionFwd[dnnResourceFilter] =
fwd_filter_data->get_converted_prv(wmat_ptr, true, in_data[conv::kWeight]);
if (!param_.no_bias) {
Tensor<xpu, 1, DType> bias =
mkl_experimental_direct_get<xpu, 1, DType>(in_data[conv::kBias], s);
res_convolutionFwd[dnnResourceBias] =
fwd_bias_data->get_converted_prv(bias.dptr_, true, in_data[conv::kBias]);
}
res_convolutionFwd[dnnResourceDst] = fwd_top_data->get_output_ptr(out_ptr,
fwd_top_data, out_data[conv::kOut]);
status = dnnExecute<DType>(convolutionFwd, res_convolutionFwd);
CHECK_EQ(status, 0) << "Forward convolution failed with status " << status;
#if MKL_EXPERIMENTAL == 0
if (fwd_top_data->conversion_needed()) {
fwd_top_data->convert_from_prv(out_ptr);
}
#endif
}
void AddToModeAllocAndStoreBuffer(void *src, int blob_size, Storage::Handle *pws) {
int blob_byte_size = blob_size * sizeof(DType);
*pws = Storage::Get()->Alloc(blob_byte_size, Context::CPU());
memcpy(pws->dptr, src, blob_byte_size);
}
void AddToModeAddAndReleaseBuffer(Storage::Handle *pws, void *dst_, int blob_size) {
DType *dst = reinterpret_cast<DType*>(dst_);
DType *src = reinterpret_cast<DType*>(pws->dptr);
#pragma omp parallel for
for (int i = 0; i < blob_size; i++) {
dst[i] += src[i];
}
if (pws->dptr)
Storage::Get()->Free(*pws);
pws->dptr = NULL;
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
if (param_.kernel.ndim() > 2) {
LOG(FATAL) << "Volume convolution is not implmented in mshadow";
}
CHECK_EQ(out_grad.size(), 1);
size_t expected = param_.no_bias == 0 ? 3 : 2;
CHECK(in_data.size() == expected && in_grad.size() == expected);
CHECK_EQ(req.size(), expected);
CHECK_EQ(in_data[conv::kWeight].CheckContiguous(), true);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4, DType> data =
mkl_experimental_direct_get<xpu, 4, DType>(in_data[conv::kData], s);
Shape<3> wmat_shape =
Shape3(param_.num_group,
param_.num_filter / param_.num_group,
data.shape_[1] / param_.num_group * param_.kernel[0] * param_.kernel[1]);
Tensor<xpu, 3, DType> wmat =
mkl_experimental_direct_get_with_shape<xpu, 3, DType>(
in_data[conv::kWeight], wmat_shape, s);
Tensor<xpu, 4, DType> grad =
mkl_experimental_direct_get<xpu, 4, DType>(out_grad[conv::kOut], s);
Tensor<xpu, 4, DType> gdata =
mkl_experimental_direct_get<xpu, 4, DType>(in_grad[conv::kData], s);
Tensor<xpu, 3, DType> gwmat =
mkl_experimental_direct_get_with_shape<xpu, 3, DType>(
in_grad[conv::kWeight], wmat_shape, s);
if (!init_mkldnn_) {
init_mkldnn_ = true;
LayerSetUp(data, grad);
}
int status;
if (req[0]) {
void *res_convolutionBwdData[dnnResourceNumber];
res_convolutionBwdData[dnnResourceDiffDst] =
bwdd_top_diff->get_converted_prv(grad.dptr_, true, out_grad[conv::kOut]);
res_convolutionBwdData[dnnResourceFilter] =
bwdd_filter_data->get_converted_prv(wmat.dptr_, false, in_data[conv::kWeight]);
Storage::Handle addtoWorkspace;
if (req[0] == kAddTo) {
// wait mkl support addto mode
AddToModeAllocAndStoreBuffer(gdata.dptr_, in_grad[conv::kData].Size(), &addtoWorkspace);
}
res_convolutionBwdData[dnnResourceDiffSrc] = bwdd_bottom_diff->get_output_ptr(gdata.dptr_,
bwdd_bottom_diff, in_grad[conv::kData]);
status = dnnExecute<DType>(convolutionBwdData, res_convolutionBwdData);
CHECK_EQ(status, 0) << "Backward Data conv failed with status " << status;
#if MKL_EXPERIMENTAL == 0
if (bwdd_bottom_diff->conversion_needed()) {
bwdd_bottom_diff->convert_from_prv(gdata.dptr_);
}
#endif
if (req[0] == kAddTo) {
if (bwdd_bottom_diff->conversion_needed()) {
bwdd_bottom_diff->convert_from_prv(gdata.dptr_);
}
AddToModeAddAndReleaseBuffer(&addtoWorkspace, gdata.dptr_, in_grad[conv::kData].Size());
}
}
if (req[1]) {
void *res_convolutionBwdFilter[dnnResourceNumber];
res_convolutionBwdFilter[dnnResourceDiffDst] =
bwdf_top_diff->get_converted_prv(grad.dptr_, true, out_grad[conv::kOut]);
res_convolutionBwdFilter[dnnResourceSrc] =
bwdf_bottom_data->get_converted_prv(data.dptr_, false,
in_data[conv::kData]);
Storage::Handle addtoWorkspace;
if (req[1] == kAddTo) {
// wait mkl support addto mode
AddToModeAllocAndStoreBuffer(gwmat.dptr_, in_grad[conv::kWeight].Size(), &addtoWorkspace);
}
res_convolutionBwdFilter[dnnResourceDiffFilter] = bwdf_filter_diff->get_output_ptr(
gwmat.dptr_, bwdf_filter_diff, in_grad[conv::kWeight]);
status = dnnExecute<DType>(convolutionBwdFilter, res_convolutionBwdFilter);
CHECK_EQ(status, 0) << "Backward Filter conv failed with status " << status;
#if MKL_EXPERIMENTAL == 0
if (bwdf_filter_diff->conversion_needed()) {
bwdf_filter_diff->convert_from_prv(gwmat.dptr_);
}
#endif
if (req[1] == kAddTo) {
if (bwdf_filter_diff->conversion_needed()) {
bwdf_filter_diff->convert_from_prv(gwmat.dptr_);
}
AddToModeAddAndReleaseBuffer(&addtoWorkspace, gwmat.dptr_, in_grad[conv::kWeight].Size());
}
}
if (!param_.no_bias) {
Tensor<xpu, 1, DType> gbias =
mkl_experimental_direct_get<xpu, 1, DType>(in_grad[conv::kBias], s);
void *res_convolutionBwdBias[dnnResourceNumber];
res_convolutionBwdBias[dnnResourceDiffDst] =
bwdb_top_diff->get_converted_prv(grad.dptr_, true, out_grad[conv::kOut]);
res_convolutionBwdBias[dnnResourceDiffBias] = bwdb_bias_diff->get_output_ptr(gbias.dptr_,
bwdb_bias_diff, in_grad[conv::kBias]);
status = dnnExecute<DType>(convolutionBwdBias, res_convolutionBwdBias);
CHECK_EQ(status, 0) << "Backward Bias failed with status " << status;
#if MKL_EXPERIMENTAL == 0
if (bwdb_bias_diff->conversion_needed()) {
bwdb_bias_diff->convert_from_prv(gbias.dptr_);
}
#endif
}
}
private:
ConvolutionParam param_;
size_t width_,
height_,
width_out_,
height_out_,
kernel_w_,
kernel_h_,
stride_w_,
stride_h_;
int group_,
num_,
num_output_;
size_t channels_;
int pad_w_,
pad_h_;
bool init_mkldnn_;
dnnPrimitive_t convolutionFwd;
dnnPrimitive_t convolutionBwdData;
dnnPrimitive_t convolutionBwdFilter;
dnnPrimitive_t convolutionBwdBias;
/* Fwd step */
std::shared_ptr<MKLData<DType> > fwd_bottom_data, fwd_top_data, fwd_filter_data,
fwd_bias_data;
/* Bwd data step */
std::shared_ptr<MKLData<DType> > bwdd_top_diff, bwdd_bottom_diff;
std::shared_ptr<MKLData<DType> > bwdd_filter_data;
/* Bwd filter step */
std::shared_ptr<MKLData<DType> > bwdf_top_diff, bwdf_filter_diff;
std::shared_ptr<MKLData<DType> > bwdf_bottom_data;
std::shared_ptr<MKLData<DType> > bwdf_filter_diff_iter, bwdf2fwd_filter_diff,
bwdb_bias_diff_iter;
/* Bwd bias step */
std::shared_ptr<MKLData<DType> > bwdb_top_diff, bwdb_bias_diff;
}; // class ConvolutionOp
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MKL_MKL_CONVOLUTION_INL_H_
|
custom_functions.h | //
// Project Name: Kratos
// Last Modified by: $Author: G.Casas (gcasas@cimmne.upc.edu)$
// Date: $Date: 2011-6-13 08:56:42 $
// Revision: $Revision: 1.5 $
//
//
//README::::look to the key word "VERSION" if you want to find all the points where you have to change something so that you can pass from a kdtree to a bin data search structure;
#if !defined(KRATOS_CUSTOM_FUNCTIONS)
#define KRATOS_CUSTOM_FUNCTIONS
// /* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
// System includes
#include <vector>
// Project includes
#include "includes/model_part.h"
#include "utilities/timer.h"
#include "utilities/openmp_utils.h"
#include "processes/find_elements_neighbours_process.h"
#include "processes/find_nodal_neighbours_process.h"
//Database includes
#include "custom_utilities/search/discrete_particle_configure.h"
#include "includes/define.h"
#include "custom_elements/discrete_element.h"
#include "custom_elements/swimming_particle.h"
#include "custom_utilities/AuxiliaryFunctions.h"
#include "custom_elements/spheric_particle.h"
#include "swimming_DEM_application.h"
#include "utilities/geometry_utilities.h"
namespace Kratos
{
template <std::size_t TDim>
class CustomFunctionsCalculator
{
public:
typedef ModelPart::ElementsContainerType::iterator ElementIterator;
typedef ModelPart::NodesContainerType::iterator NodeIterator;
typedef ModelPart::NodesContainerType NodesArrayType;
KRATOS_CLASS_POINTER_DEFINITION(CustomFunctionsCalculator);
CustomFunctionsCalculator(): mPressuresFilled(false), mFirstGradientRecovery(true), mFirstLaplacianRecovery(true), mSomeCloudsDontWork(false), mCalculatingTheGradient(false), mCalculatingTheLaplacian(false), mFirstTimeAppending(true){}
/// Calculator
virtual ~CustomFunctionsCalculator(){}
/// Default calculator
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void CalculatePressureGradient(ModelPart& r_model_part)
{
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){
noalias(inode->FastGetSolutionStepValue(PRESSURE_GRADIENT)) = ZeroVector(3);
}
array_1d <double, 3> grad = ZeroVector(3); // its dimension is always 3
array_1d <double, TDim + 1 > elemental_pressures;
array_1d <double, TDim + 1 > N; // shape functions vector
BoundedMatrix<double, TDim + 1, TDim> DN_DX;
for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){
// computing the shape function derivatives
Geometry<Node<3> >& geom = ielem->GetGeometry();
double Volume;
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume);
// getting the pressure gradients;
for (unsigned int i = 0; i < TDim + 1; ++i){
elemental_pressures[i] = geom[i].FastGetSolutionStepValue(PRESSURE);
}
array_1d <double, TDim> grad_aux = prod(trans(DN_DX), elemental_pressures); // its dimension may be 2
for (unsigned int i = 0; i < TDim; ++i){
grad[i] = grad_aux[i];
}
double nodal_area = Volume / static_cast<double>(TDim + 1);
grad *= nodal_area;
for (unsigned int i = 0; i < TDim + 1; ++i){
geom[i].FastGetSolutionStepValue(PRESSURE_GRADIENT) += grad;
}
}
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){
inode->FastGetSolutionStepValue(PRESSURE_GRADIENT) /= inode->FastGetSolutionStepValue(NODAL_AREA);
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// This function assesses the stationarity based on the pressure field variation.
// Its tolerance applies to the non-dimensional pressure variation between consecutive
// measurements.
bool AssessStationarity(ModelPart& r_model_part, const double& tol)
{
if (!mPressuresFilled){
PerformFirstStepComputations(r_model_part);
return(false);
}
else {
double max_pressure_change_rate = 0.0; // measure of stationarity
double mean_celerity = 0.0; // used to adimensionalize the time step
// filling up mPressures and calculating the mean velocities and the maximum nodal pressure change
unsigned int i = 0;
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode){
const array_1d<double, 3>& velocity = inode->FastGetSolutionStepValue(VELOCITY);
mean_celerity += SWIMMING_MODULUS_3(velocity);
const double new_pressure = inode->FastGetSolutionStepValue(PRESSURE);
double& old_pressure = mPressures[i];
const double delta_p = std::abs(new_pressure - old_pressure);
max_pressure_change_rate = std::max(delta_p, max_pressure_change_rate);
old_pressure = new_pressure;
++i;
}
mean_celerity /= i;
const double delta_t = r_model_part.GetProcessInfo()[TIME] - mLastMeasurementTime;
if (delta_t > 0.0){
max_pressure_change_rate /= delta_t;
// calculating coefficients for adimensionalization of the pressure change rate
const double characteristic_length = std::pow(mTotalDomainVolume, 1.0 / 3); // characteristic length of the model. Should be improved: a hydraulic radius or such
const double reciprocal_of_characteristic_time = mean_celerity / characteristic_length;
const double pressure_spatial_variation = GetRangeWithinVector(mPressures);
mLastPressureVariation = pressure_spatial_variation;
const double characteristic_pressure_variation = 0.5 * (pressure_spatial_variation + mLastPressureVariation);
if (std::abs(characteristic_pressure_variation) == 0.0 || std::abs(reciprocal_of_characteristic_time) == 0.0){ // unlikely
std::cout << "Uniform problem: stationarity check being performed with dimensional values...! " << "\n";
if (max_pressure_change_rate <= tol){ // go with the absolute value
return true;
}
}
max_pressure_change_rate /= reciprocal_of_characteristic_time * characteristic_pressure_variation ;
}
else {
KRATOS_ERROR << "Trying to calculate pressure variations between two coincident time steps! (null time variation since last recorded time)" << std::endl;
}
std::cout << "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" << "\n";
std::cout << "The stationarity condition tolerance is " << "\n";
KRATOS_INFO("SwimmingDEM") << tol << std::endl;
std::cout << "The stationarity residual is now " << "\n";
KRATOS_INFO("SwimmingDEM") << max_pressure_change_rate << std::endl;
std::cout << "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" << "\n";
return max_pressure_change_rate <= tol;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateDomainVolume(ModelPart& r_fluid_model_part)
{
OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double added_volume = 0.0;
#pragma omp parallel for reduction(+ : added_volume)
for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){
added_volume += CalculateElementalVolume(it->GetGeometry());
}
}
return added_volume;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// this function assumes linear elements are used
void CalculateTotalHydrodynamicForceOnParticles(ModelPart& r_dem_model_part, array_1d <double, 3>& force)
{
OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_dem_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
std::vector<array_1d <double, 3> > added_force_vect;
added_force_vect.resize(ParallelUtilities::GetNumThreads());
for (unsigned int k = 0; k < added_force_vect.size(); ++k){
added_force_vect[k] = ZeroVector(3);
}
#pragma omp parallel for
for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_dem_model_part, k); it != GetElementPartitionEnd(r_dem_model_part, k); ++it){
Geometry< Node<3> >& geom = it->GetGeometry();
array_1d <double, 3> element_force;
if (geom[0].SolutionStepsDataHas(HYDRODYNAMIC_FORCE)){
element_force = geom[0].FastGetSolutionStepValue(HYDRODYNAMIC_FORCE);
}
else {
element_force = ZeroVector(3);
}
added_force_vect[k] += element_force;
}
}
force = added_force_vect[0];
for (unsigned int k = 1; k < added_force_vect.size(); ++k){
force += added_force_vect[k];
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// this function assumes linear elements are used
void CalculateTotalHydrodynamicForceOnFluid(ModelPart& r_fluid_model_part, array_1d <double, 3>& instantaneous_force, array_1d <double, 3>& mean_force)
{
OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
std::vector<array_1d <double, 3> > added_force_vect;
added_force_vect.resize(ParallelUtilities::GetNumThreads());
std::vector<array_1d <double, 3> > added_mean_force_vect;
added_mean_force_vect.resize(ParallelUtilities::GetNumThreads());
for (unsigned int k = 0; k < added_force_vect.size(); ++k){
added_force_vect[k] = ZeroVector(3);
added_mean_force_vect[k] = ZeroVector(3);
}
#pragma omp parallel for
for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){
Geometry< Node<3> >& geom = it->GetGeometry();
double element_volume;
array_1d <double, 3> element_force;
array_1d <double, 3> element_mean_force;
if (geom[0].SolutionStepsDataHas(HYDRODYNAMIC_REACTION) && geom[0].SolutionStepsDataHas(FLUID_FRACTION)){
element_force = CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(geom, HYDRODYNAMIC_REACTION, element_volume);
}
else {
element_force = ZeroVector(3);
}
if (geom[0].SolutionStepsDataHas(MEAN_HYDRODYNAMIC_REACTION) && geom[0].SolutionStepsDataHas(FLUID_FRACTION)){
element_mean_force = CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(geom, MEAN_HYDRODYNAMIC_REACTION, element_volume);
}
else {
element_mean_force = ZeroVector(3);
}
added_force_vect[k] += element_force;
added_mean_force_vect[k] += element_mean_force;
}
}
instantaneous_force = added_force_vect[0];
mean_force = added_force_vect[0];
for (unsigned int k = 1; k < added_force_vect.size(); ++k){
instantaneous_force += added_force_vect[k];
mean_force += added_mean_force_vect[k];
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// this function assumes linear elements are used
double CalculateGlobalFluidVolume(ModelPart& r_fluid_model_part)
{
OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_fluid_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double added_fluid_volume = 0.0;
#pragma omp parallel for reduction(+ : added_fluid_volume)
for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){
for (ElementIterator it = GetElementPartitionBegin(r_fluid_model_part, k); it != GetElementPartitionEnd(r_fluid_model_part, k); ++it){
Geometry< Node<3> >& geom = it->GetGeometry();
double element_volume;
double element_fluid_volume;
if (geom[0].SolutionStepsDataHas(FLUID_FRACTION)){
element_fluid_volume = CalculateScalarIntegralOfLinearInterpolation(geom, FLUID_FRACTION, element_volume);
}
else {
element_fluid_volume = CalculateElementalVolume(geom);
}
added_fluid_volume += element_fluid_volume;
}
}
return added_fluid_volume;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
template<class matrix_T>
double determinant(boost::numeric::ublas::matrix_expression<matrix_T> const& mat_r)
{
double det = 1.0;
matrix_T mLu(mat_r() );
boost::numeric::ublas::permutation_matrix<std::size_t> pivots(mat_r().size1() );
int is_singular = lu_factorize(mLu, pivots);
if (!is_singular)
{
for (std::size_t i=0; i < pivots.size(); ++i)
{
if (pivots(i) != i)
det *= -1.0;
det *= mLu(i,i);
}
}
else
det = 0.0;
return det;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
const DenseMatrix<double> Inverse(
const DenseMatrix<double>& m)
{
assert(m.size1() == m.size2() && "Can only calculate the inverse of square matrices");
switch(m.size1())
{
case 1:
{
assert(m.size1() == 1 && m.size2() == 1 && "Only for 1x1 matrices");
const double determinant = CalcDeterminant(m);
assert(determinant != 0.0);
assert(m(0,0) != 0.0 && "Cannot take the inverse of matrix [0]");
DenseMatrix<double> n(1,1);
n(0,0) = 1.0 / determinant;
return n;
}
case 2:
{
assert(m.size1() == 2 && m.size2() == 2 && "Only for 2x2 matrices");
const double determinant = CalcDeterminant(m);
assert(determinant != 0.0);
const double a = m(0,0);
const double b = m(0,1);
const double c = m(1,0);
const double d = m(1,1);
DenseMatrix<double> n(2,2);
n(0,0) = d / determinant;
n(0,1) = -b / determinant;
n(1,0) = -c / determinant;
n(1,1) = a / determinant;
return n;
}
case 3:
{
assert(m.size1() == 3 && m.size2() == 3 && "Only for 3x3 matrices");
const double determinant = CalcDeterminant(m);
assert(determinant != 0.0);
const double a = m(0,0);
const double b = m(0,1);
const double c = m(0,2);
const double d = m(1,0);
const double e = m(1,1);
const double f = m(1,2);
const double g = m(2,0);
const double h = m(2,1);
const double k = m(2,2);
DenseMatrix<double> n(3,3);
const double new_a = ((e*k)-(f*h)) / determinant;
const double new_b = -((d*k)-(f*g)) / determinant;
const double new_c = ((d*h)-(e*g)) / determinant;
const double new_d = -((b*k)-(c*h)) / determinant;
const double new_e = ((a*k)-(c*g)) / determinant;
const double new_f = -((a*h)-(b*g)) / determinant;
const double new_g = ((b*f)-(c*e)) / determinant;
const double new_h = -((a*f)-(c*d)) / determinant;
const double new_k = ((a*e)-(b*d)) / determinant;
n(0,0) = new_a;
n(1,0) = new_b;
n(2,0) = new_c;
n(0,1) = new_d;
n(1,1) = new_e;
n(2,1) = new_f;
n(0,2) = new_g;
n(1,2) = new_h;
n(2,2) = new_k;
return n;
}
default:
{
//Use blockwise inversion
//Matrix::Chop returns a std::vector
//[ A at [0] B at [1] ]
//[ C at [2] D at [4] ]
const std::vector<DenseMatrix<double> > v = Chop(m);
const DenseMatrix<double>& a = v[0];
assert(a.size1() == a.size2());
const DenseMatrix<double> a_inv = Inverse(a);
const DenseMatrix<double>& b = v[1];
const DenseMatrix<double>& c = v[2];
const DenseMatrix<double>& d = v[3];
const DenseMatrix<double> term
= d
- prod(
DenseMatrix<double>(prod(c,a_inv)),
b
);
const DenseMatrix<double> term_inv = Inverse(term);
const DenseMatrix<double> new_a
= a_inv
+ DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
a_inv,
b)),
term_inv)),
c)),
a_inv));
const DenseMatrix<double> new_b
=
- DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
a_inv,
b)),
term_inv));
const DenseMatrix<double> new_c
=
- DenseMatrix<double>(prod(
DenseMatrix<double>(prod(
term_inv,
c)),
a_inv));
const DenseMatrix<double> new_d = term_inv;
std::vector<DenseMatrix<double> > w;
w.push_back(new_a);
w.push_back(new_b);
w.push_back(new_c);
w.push_back(new_d);
const DenseMatrix<double> result = Unchop(w);
return result;
}
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void CopyValuesFromFirstToSecond(ModelPart& r_model_part, const Variable<double>& origin_variable, const Variable<double>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
double& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
const double& origin_value = p_node->FastGetSolutionStepValue(origin_variable);
destination_value = origin_value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void CopyValuesFromFirstToSecond(ModelPart& r_model_part, const Variable<array_1d<double, 3>>& origin_variable, const Variable<array_1d<double, 3>>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
array_1d<double, 3>& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
const array_1d<double, 3>& origin_value = p_node->FastGetSolutionStepValue(origin_variable);
noalias(destination_value) = origin_value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void SetValueOfAllNotes(ModelPart& r_model_part, const double& value, const Variable<double>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
double& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
destination_value = value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void SetValueOfAllNotes(ModelPart& r_model_part, const array_1d<double, 3>& value, const Variable<array_1d<double, 3>>& destination_variable)
{
#pragma omp parallel for
for (int i = 0; i < (int)r_model_part.Nodes().size(); ++i){
ModelPart::NodesContainerType::iterator i_particle = r_model_part.NodesBegin() + i;
Node<3>::Pointer p_node = *(i_particle.base());
array_1d<double, 3>& destination_value = p_node->FastGetSolutionStepValue(destination_variable);
noalias(destination_value) = value;
}
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
private:
bool mPressuresFilled;
bool mFirstGradientRecovery;
bool mFirstLaplacianRecovery;
bool mSomeCloudsDontWork;
bool mCalculatingTheGradient;
bool mCalculatingTheLaplacian;
bool mFirstTimeAppending;
double mLastMeasurementTime;
double mLastPressureVariation;
double mTotalDomainVolume;
std::vector<double> mPressures;
std::vector<DenseVector<double> > mFirstRowsOfB;
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
inline double CalculateArea(const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2)
{
const double x10 = x1 - x0;
const double y10 = y1 - y0;
const double x20 = x2 - x0;
const double y20 = y2 - y0;
const double area = 0.5 * std::abs(x10 * y20 - x20 * y10);
return area;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
inline double CalculateVol(const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 +
y10 * z20 * x30 - y10 * x20 * z30 +
z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666666667;
}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateElementalVolume(const Geometry<Node <3> >& geom)
{
double vol;
double h;
if (TDim == 2){
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
vol = CalculateArea(x0, y0, x1, y1, x2, y2);
h = CalculateDiameter(x0, y0, x1, y1, x2, y2);
}
else {
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
h = CalculateDiameter(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
}
if (std::abs(vol)/std::pow(h, 3) < std::numeric_limits<double>::epsilon()){
KRATOS_ERROR << "Element with zero area found with the current geometry "<< geom << std::endl;
}
return vol;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateDiameter(const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double x12 = x1 - x0;
double y12 = y1 - y0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double dist_10 = std::sqrt(std::pow(x10,2) + std::pow(y10,2));
double dist_12 = std::sqrt(std::pow(x12,2) + std::pow(y12,2));
double dist_20 = std::sqrt(std::pow(x20,2) + std::pow(y20,2));
double arr[] = {dist_10, dist_12, dist_20};
double *max = std::max_element(std::begin(arr), std::end(arr));
return *max;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateDiameter(const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x12 = x1 - x0;
double y12 = y1 - y0;
double z12 = z1 - z0;
double x13 = x1 - x3;
double y13 = y1 - y3;
double z13 = z1 - z3;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x23 = x2 - x3;
double y23 = y2 - y3;
double z23 = z2 - z3;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double dist_10 = std::sqrt(std::pow(x10,2) + std::pow(y10,2) + std::pow(z10,2));
double dist_12 = std::sqrt(std::pow(x12,2) + std::pow(y12,2) + std::pow(z12,2));
double dist_13 = std::sqrt(std::pow(x13,2) + std::pow(y13,2) + std::pow(z13,2));
double dist_20 = std::sqrt(std::pow(x20,2) + std::pow(y20,2) + std::pow(z20,2));
double dist_23 = std::sqrt(std::pow(x23,2) + std::pow(y23,2) + std::pow(z23,2));
double dist_30 = std::sqrt(std::pow(x30,2) + std::pow(y30,2) + std::pow(z30,2));
double arr[] = {dist_10, dist_12, dist_13, dist_20, dist_23, dist_30};
double *max = std::max_element(std::begin(arr), std::end(arr));
return *max;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateScalarIntegralOfLinearInterpolation(const Geometry<Node < 3 > >& geom, const Variable<double>& r_var, double& vol)
{
array_1d<double, 4> N;
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double xc = 0.25 * (x0 + x1 + x2 + x3);
double yc = 0.25 * (y0 + y1 + y2 + y3);
double zc = 0.25 * (z0 + z1 + z2 + z3);
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF(std::abs(vol) == 0.0) << "Element with zero area found. Its geometry is given by "<< geom << std::endl;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc);
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc);
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc);
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc);
double value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var);
for (unsigned int i = 1; i != 4; ++i){
value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var, 0);
}
return value_at_gauss_point;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
array_1d <double, 3> CalculateVectorIntegralOfLinearInterpolation(const Geometry<Node < 3 > >& geom, const Variable<array_1d <double, 3> >& r_var, double& vol)
{
array_1d<double, 4> N;
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double xc = 0.25 * (x0 + x1 + x2 + x3);
double yc = 0.25 * (y0 + y1 + y2 + y3);
double zc = 0.25 * (z0 + z1 + z2 + z3);
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF(std::abs(vol) == 0.0) << "Element with zero area found. Its geometry is given by " << geom << std::endl;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc);
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc);
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc);
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc);
array_1d <double, 3> value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var);
for (unsigned int i = 1; i != 4; ++i){
value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var);
}
return value_at_gauss_point;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
array_1d <double, 3> CalculateVectorIntegralOfLinearInterpolationPerUnitFluidMass(const Geometry<Node < 3 > >& geom, const Variable<array_1d <double, 3> >& r_var, double& vol)
{
array_1d<double, 4> N;
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double xc = 0.25 * (x0 + x1 + x2 + x3);
double yc = 0.25 * (y0 + y1 + y2 + y3);
double zc = 0.25 * (z0 + z1 + z2 + z3);
vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF(std::abs(vol) == 0.0) << "Element with zero area found. Its geometry is given by " << geom << std::endl;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc);
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc);
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc);
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc);
array_1d <double, 3> value_at_gauss_point = N[0] * geom[0].FastGetSolutionStepValue(r_var) * geom[0].FastGetSolutionStepValue(DENSITY) * geom[0].FastGetSolutionStepValue(FLUID_FRACTION);
for (unsigned int i = 1; i != 4; ++i){
value_at_gauss_point += N[i] * geom[i].FastGetSolutionStepValue(r_var) * geom[i].FastGetSolutionStepValue(DENSITY) * geom[i].FastGetSolutionStepValue(FLUID_FRACTION);
}
return value_at_gauss_point;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
void PerformFirstStepComputations(ModelPart& r_model_part)
{
mTotalDomainVolume = CalculateDomainVolume(r_model_part);
mPressures.resize(r_model_part.Nodes().size());
mLastMeasurementTime = r_model_part.GetProcessInfo()[TIME];
unsigned int i = 0;
for (NodeIterator inode = r_model_part.NodesBegin(); inode != r_model_part.NodesEnd(); ++inode) {
mPressures[i] = inode->FastGetSolutionStepValue(PRESSURE);
++i;
}
mPressuresFilled = true;
mLastPressureVariation = GetRangeWithinVector(mPressures);
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
struct IsCloser{
bool operator()(std::pair<unsigned int, double> const& first_pair, std::pair<unsigned int, double> const& second_pair)
{
return(first_pair.second < second_pair.second || (first_pair.second == second_pair.second && first_pair.first < second_pair.first));
}
};
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
inline int Factorial(const unsigned int n){
if (n == 0){
return 1;
}
unsigned int k = n;
for (unsigned int i = n - 1; i > 0; --i){
k *= i;
}
return k;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateTheMaximumEdgeLength(ModelPart& r_model_part)
{
double max_distance_yet = 0.0;
for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){
Geometry<Node<3> >& geom = ielem->GetGeometry();
unsigned int n_nodes = static_cast<unsigned int>(TDim + 1);
for (unsigned int k = 1; k < n_nodes - 1; ++k){
for (unsigned int i = k; i < n_nodes; ++i){
array_1d <double, 3> delta_i = geom[k - 1] - geom[i];
double distance_2 = DEM_INNER_PRODUCT_3(delta_i, delta_i);
max_distance_yet = max_distance_yet > distance_2 ? max_distance_yet : distance_2;
}
}
}
return(std::sqrt(max_distance_yet));
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
double CalculateTheMinumumEdgeLength(ModelPart& r_model_part)
{
double min_distance_yet = 0.0;
bool first_node = true;
for (ModelPart::ElementIterator ielem = r_model_part.ElementsBegin(); ielem != r_model_part.ElementsEnd(); ++ielem){
Geometry<Node<3> >& geom = ielem->GetGeometry();
if (first_node){ // assign the distance (squared) between any two nodes to min_distance_yet
array_1d <double, 3> delta = geom[0] - geom[1];
double distance_2 = DEM_INNER_PRODUCT_3(delta, delta);
min_distance_yet = distance_2;
}
unsigned int n_nodes = static_cast<unsigned int>(TDim + 1);
for (unsigned int k = 1; k < n_nodes - 1; ++k){
for (unsigned int i = k; i < n_nodes; ++i){
array_1d <double, 3> delta_i = geom[k - 1] - geom[i];
double distance_2 = DEM_INNER_PRODUCT_3(delta_i, delta_i);
min_distance_yet = min_distance_yet < distance_2 ? min_distance_yet : distance_2;
}
}
}
return(std::sqrt(min_distance_yet));
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
// The following block of functions is used to calculate explicit matrix inverses and was taken from
// Richel BilderBeek's website (http://www.richelbilderbeek.nl/CppUblasMatrixExample6.htm), and it is
// transcribed here with a very minor modification
double CalcDeterminant(const DenseMatrix<double>& m)
{
assert(m.size1() == m.size2() && "Can only calculate the determinant of square matrices");
switch(m.size1())
{
case 1:
{
return m(0,0);
}
case 2:
{
const double a = m(0,0);
const double b = m(0,1);
const double c = m(1,0);
const double d = m(1,1);
const double determinant = (a * d) - (b * c);
return determinant;
}
case 3:
{
assert(m.size1() == 3 && m.size2() == 3 && "Only for 3x3 matrices");
const double a = m(0,0);
const double b = m(0,1);
const double c = m(0,2);
const double d = m(1,0);
const double e = m(1,1);
const double f = m(1,2);
const double g = m(2,0);
const double h = m(2,1);
const double k = m(2,2);
const double determinant
= (a * ((e*k) - (f*h)))
- (b * ((k*d) - (f*g)))
+ (c * ((d*h) - (e*g)));
return determinant;
}
default:
assert(!"Should not get here: unsupported matrix size");
throw std::runtime_error("Unsupported matrix size");
}
}
///Chop returns a std::vector of sub-matrices
//[ A at [0] B at [1] ]
//[ C at [2] D at [4] ]
const std::vector<DenseMatrix<double> > Chop(
const DenseMatrix<double>& m)
{
using boost::numeric::ublas::range;
using boost::numeric::ublas::matrix_range;
std::vector<matrix<double> > v;
v.reserve(4);
const int midy = m.size1() / 2;
const int midx = m.size2() / 2;
const matrix_range<const matrix<double> > top_left( m,range(0 ,midy ),range(0 ,midx ));
const matrix_range<const matrix<double> > bottom_left( m,range(midy,m.size1()),range(0 ,midx ));
const matrix_range<const matrix<double> > top_right( m,range(0 ,midy ),range(midx,m.size2()));
const matrix_range<const matrix<double> > bottom_right(m,range(midy,m.size1()),range(midx,m.size2()));
v.push_back(matrix<double>(top_left));
v.push_back(matrix<double>(top_right));
v.push_back(matrix<double>(bottom_left));
v.push_back(matrix<double>(bottom_right));
return v;
}
///Unchop merges the 4 std::vector of sub-matrices produced by Chop
const DenseMatrix<double> Unchop(
const std::vector<DenseMatrix<double> >& v)
{
//Chop returns a std::vector of sub-matrices
//[ A at [0] B at [1] ]
//[ C at [2] D at [4] ]
using boost::numeric::ublas::range;
using boost::numeric::ublas::matrix_range;
assert(v.size() == 4);
assert(v[0].size1() == v[1].size1());
assert(v[2].size1() == v[3].size1());
assert(v[0].size2() == v[2].size2());
assert(v[1].size2() == v[3].size2());
DenseMatrix<double> m(v[0].size1() + v[2].size1(),v[0].size2() + v[1].size2());
for (int quadrant=0; quadrant!=4; ++quadrant)
{
const DenseMatrix<double>& w = v[quadrant];
const std::size_t n_rows = v[quadrant].size1();
const std::size_t n_cols = v[quadrant].size2();
const int offset_x = quadrant % 2 ? v[0].size2() : 0;
const int offset_y = quadrant / 2 ? v[0].size1() : 0;
for (std::size_t row=0; row!=n_rows; ++row)
{
for (std::size_t col=0; col!=n_cols; ++col)
{
m(offset_y + row, offset_x + col) = w(row,col);
}
}
}
assert(v[0].size1() + v[2].size1() == m.size1());
assert(v[1].size1() + v[3].size1() == m.size1());
assert(v[0].size2() + v[1].size2() == m.size2());
assert(v[2].size2() + v[3].size2() == m.size2());
return m;
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
///@}
///@name Member r_variables
///@{
DenseVector<unsigned int> mElementsPartition;
///@}
///@name Un accessible methods
///@{
double GetRangeWithinVector(const std::vector<double>& vector)
{
double min = vector[0];
double max = vector[0];
for (unsigned int i = 0; i != vector.size(); ++i){
min = std::min(min, mPressures[i]);
max = std::max(max, mPressures[i]);
}
return (max - min);
}
DenseVector<unsigned int>& GetElementPartition()
{
return mElementsPartition;
}
ElementIterator GetElementPartitionBegin(ModelPart& r_model_part, unsigned int k)
{
return r_model_part.GetCommunicator().LocalMesh().Elements().ptr_begin() + mElementsPartition[k];
}
ElementIterator GetElementPartitionEnd(ModelPart& r_model_part, unsigned int k)
{
return r_model_part.GetCommunicator().LocalMesh().Elements().ptr_begin() + mElementsPartition[k + 1];
}
//**************************************************************************************************************************************************
//**************************************************************************************************************************************************
}; // Class CustomFunctionsCalculator
} // namespace Kratos.
#endif // KRATOS_CREATE_AND_DESTROY defined
|
parallelSectionsConstruct.c | int main() {
int x = 10;
int localX = 5;
#pragma omp parallel sections
{
#pragma omp section
{
localX = x;
}
#pragma omp section
{
localX = x + 10;
}
#pragma omp section
{
localX = x + 20;
}
}
x = localX;
}
|
GB_binop__islt_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int64)
// A*D function (colscale): GB (_AxD__islt_int64)
// D*A function (rowscale): GB (_DxB__islt_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int64)
// C=scalar+B GB (_bind1st__islt_int64)
// C=scalar+B' GB (_bind1st_tran__islt_int64)
// C=A+scalar GB (_bind2nd__islt_int64)
// C=A'+scalar GB (_bind2nd_tran__islt_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_INT64 || GxB_NO_ISLT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__islt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ann.c | // bahri abacı ann library
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include "private/iocore_private.h" // for getopt
#include "private/mlcore_private.h"
// return the input size of the given layer #l
uint32_t get_input_size(struct ann_t *model, uint32_t layer)
{
// must be input layer
if (layer == 0)
{
return model->num_feature;
}
else
{
return model->layer[layer - 1]->num_neuron;
}
}
// return the output size of the given layer #l
uint32_t get_output_size(struct ann_t *model, uint32_t layer)
{
// must be output layer
if (layer == model->num_layer - 1)
{
return model->num_class;
}
else
{
return model->layer[layer + 1]->num_neuron;
}
}
struct layer_t *layer_create(uint32_t num_input, uint32_t num_neuron, char layer_type)
{
// create a single layer to return
struct layer_t *out = (struct layer_t *)calloc(1, sizeof(struct layer_t));
// get the space wrt density of the layer
out->num_neuron = num_neuron;
out->type = layer_type;
// set the activation function and its derivative wrt given choice
// logistic function
if (out->type == 'L')
{
out->activation = &sigmoid;
out->derivative = &sigmoid_d;
}
// tangent hyperbolic function
else if (out->type == 'T')
{
out->activation = &tanhyper;
out->derivative = &tanhyper_d;
}
// rectified linear unit function
else if (out->type == 'R')
{
out->activation = &relu;
out->derivative = &relu_d;
}
// default value is logistic function
else
{
out->activation = &sigmoid;
out->derivative = &sigmoid_d;
}
// we allocate all the spaces that we need
out->weights = (float **)calloc(out->num_neuron, sizeof(float *));
out->net_inputs = (float *)calloc(out->num_neuron, sizeof(float));
out->outputs = (float *)calloc(out->num_neuron, sizeof(float));
// get space for the weights of each neuron
int i, j;
for (i = 0; i < out->num_neuron; i++)
{
out->weights[i] = (float *)calloc(num_input + 1, sizeof(float));
// if the space allocated, fill the weights
if (out->weights[i] != NULL)
{
// fill weights with random numbers
for (j = 0; j < num_input + 1; j++)
{
out->weights[i][j] = random_float(-1, 1);
}
}
else
{
// error
}
}
return out;
}
// create function for the ann_t class
// ann(ANN_OPTIMIZER_ADAM, "5L,3T,5S")
struct ann_t *ann_create(uint32_t optimizer, const char *layers, char options[])
{
// allocate memory for the output network
struct ann_t *model = (struct ann_t *)malloc(sizeof(struct ann_t));
// get the optimizer type
model->optimizer = optimizer;
// set the default values for the parameters
model->max_iter = 1000;
model->batch_size = 1;
model->epsilon = 0.001f;
model->momentum = 0.0f;
model->eta = 0.001f;
model->rho = 0.9f;
model->beta = 0.9f;
// get values from configuration
getopt(uint32_t, options, "max_iter", &model->max_iter);
getopt(uint32_t, options, "batch_size", &model->batch_size);
getopt(float, options, "eta", &model->eta);
getopt(float, options, "momentum", &model->momentum);
getopt(float, options, "epsilon", &model->epsilon);
getopt(float, options, "rho", &model->rho);
getopt(float, options, "beta", &model->beta);
// allocate enough space for the layer type and number of neurons
model->layer_type = (char *)calloc(strlen(layers), sizeof(char));
model->num_neuron = (uint32_t *)calloc(strlen(layers), sizeof(uint32_t));
// create the layers
model->num_feature = 0;
model->num_class = 0;
model->num_layer = 0;
model->layer = NULL;
// parse the layers
char *layer_start = imlab_strdup(layers);
char *layer = strtok(layer_start, "-");
while (layer != NULL)
{
int n = sscanf(layer, "%d%c", &model->num_neuron[model->num_layer], &model->layer_type[model->num_layer]);
if (n == 2)
{
model->num_layer++;
}
else
{
message(WARNING, "incorrect layer descriptor");
}
layer = strtok(NULL, "-");
}
free(layer_start);
if (model->num_layer == 0)
{
message(WARNING, "incorrect layer size");
}
// allocate if necessary
else
{
model->layer = (struct layer_t **)calloc(model->num_layer, sizeof(struct layer_t *));
}
// return the model
return model;
}
// print the information about the model
void ann_view(struct ann_t *model)
{
printf("Parameters of the Artificial Neural Network\n");
printf("Options:\n");
printf("> Optimizer : %d\n", model->optimizer);
printf("> MaxIter : %d\n", model->max_iter);
printf("> Batch Size : %d\n", model->batch_size);
printf("> Eta : %3.5f\n", model->eta);
printf("> Momentum : %3.5f\n", model->momentum);
printf("> Epsilon : %3.5f\n", model->epsilon);
printf("> Rho : %3.5f\n", model->rho);
printf("> Beta : %3.5f\n", model->beta);
// print the layer configuration
if (model->num_layer > 1)
{
printf("> Hidden Layer : %d\n", model->num_layer - 1);
int i;
for (i = 0; i < model->num_layer - 1; i++)
{
printf(" %d. Layer (%c) has %d neurons\n", i + 1, model->layer_type[i], model->num_neuron[i]);
}
printf("\n");
}
// continue if the training is done
if (model->num_feature == 0)
{
return;
}
printf("Training results:\n");
printf("> #Input Feature : %d\n", model->num_feature);
printf("> #Output Class : %d\n", model->num_class);
}
// train the network with the given inputs and net configurations
return_t ann_train(matrix_t *input, matrix_t *output, struct ann_t *model)
{
int cond0 = is_numeric(input) && is_numeric(output);
check_condition(cond0, ERROR_TYPE_MISMATCH, "input must be a numeric matrix");
int cond1 = rows(input) == rows(output);
check_condition(cond1, ERROR_DIMENSION_MISMATCH, "input and label matrice have different number of samples");
int cond2 = channels(input) == 1 && channels(output) == 1;
check_condition(cond2, ERROR_DIMENSION_MISMATCH, "arguments must be one-dimensional matrice");
int cond3 = model->num_layer > 0;
check_condition(cond3, ERROR_DIMENSION_MISMATCH, "layer size must be positive");
// we are ready to update the model
uint32_t numSample = rows(input);
model->num_feature = cols(input);
model->num_class = cols(output);
// check that the given model can be used
int cond4 = model->num_neuron[model->num_layer - 1] == model->num_class;
check_condition(cond4, ERROR_DIMENSION_MISMATCH, "number of output neurons doesnt match the number of output classes");
// before everything we will compute the number of inputs for the layer and allocate space for the computations
// output of the nput layer will be the input of the first layer
uint32_t iter, batchiter = 0, b, i, j, l; // iteration, batch iteration(k), batch element, num neuron, num inputs, layer
for (l = 0; l < model->num_layer; l++)
{
model->layer[l] = layer_create(get_input_size(model, l), model->num_neuron[l], model->layer_type[l]);
}
// allocate memory for the backpropagation
float **delta = (float **)calloc(model->num_layer, sizeof(float *));
for (l = 0; l < model->num_layer; l++)
{
delta[l] = (float *)calloc(model->layer[l]->num_neuron, sizeof(float));
}
// allocate memory to store weight change
float ***gradient = (float ***)calloc(model->num_layer, sizeof(float **));
float ***auxptr1 = (float ***)calloc(model->num_layer, sizeof(float **));
float ***auxptr2 = (float ***)calloc(model->num_layer, sizeof(float **));
for (l = 0; l < model->num_layer; l++)
{
gradient[l] = (float **)calloc(model->layer[l]->num_neuron, sizeof(float *));
auxptr1[l] = (float **)calloc(model->layer[l]->num_neuron, sizeof(float *));
auxptr2[l] = (float **)calloc(model->layer[l]->num_neuron, sizeof(float *));
// allocate weight update array for each input
for (i = 0; i < model->layer[l]->num_neuron; i++)
{
gradient[l][i] = (float *)calloc(get_input_size(model, l) + 1, sizeof(float));
auxptr1[l][i] = (float *)calloc(get_input_size(model, l) + 1, sizeof(float));
auxptr2[l][i] = (float *)calloc(get_input_size(model, l) + 1, sizeof(float));
}
}
// start iterations
for (iter = 0; iter < model->max_iter; iter++)
{
// shuffle the dataset
uint32_t *idx = random_permutation(numSample);
// set the remaining data size
uint32_t remainingSampleSize = numSample;
uint32_t sampleIndexFromStart = 0;
while (remainingSampleSize != 0)
{
// find the current batch size
uint32_t batchSize = remainingSampleSize > model->batch_size ? model->batch_size : remainingSampleSize;
// for all training sample
for (b = 0; b < batchSize; b++)
{
// pick a random people in the database
uint32_t s = idx[b + sampleIndexFromStart];
// get pointer to the actual input and output values
float *inputs = data(float, input, s, 0);
float *target = data(float, output, s, 0);
// FEED FORWARD
// feed forward based on that input
for (l = 0; l < model->num_layer; l++)
{
uint32_t input_size = get_input_size(model, l);
// for each neuron in the hidden layer
//#pragma omp parallel for private(j)
for (i = 0; i < model->layer[l]->num_neuron; i++)
{
// compute the neuron net input
double neuron_signal = model->layer[l]->weights[i][input_size];
//#pragma omp parallel for reduction(+:neuron_signal)
for (j = 0; j < input_size; j++)
{
neuron_signal += model->layer[l]->weights[i][j] * inputs[j];
}
// set the net input and outputs for the current layer
model->layer[l]->net_inputs[i] = neuron_signal;
model->layer[l]->outputs[i] = model->layer[l]->activation(neuron_signal);
}
// set the inputs of the next layer as the output of the current layer
inputs = model->layer[l]->outputs;
}
// BACKPROPAGATION
// now update the weights using the gradent changes over the all network
for (l = model->num_layer; l-- > 0;)
{
uint32_t input_size = get_input_size(model, l);
uint32_t output_size = get_output_size(model, l);
// if the layer is the last layer, than compute the error
if (l == model->num_layer - 1)
{
// for each neuron in the output layer
//#pragma omp parallel for
for (i = 0; i < model->layer[l]->num_neuron; i++)
{
delta[l][i] = model->layer[l]->derivative(model->layer[l]->net_inputs[i]) * (model->layer[l]->outputs[i] - target[i]);
}
}
// propagate the output error to the hidden layers
else
{
// for each neuron in the hidden layer
//#pragma omp parallel for private(j)
for (i = 0; i < model->layer[l]->num_neuron; i++)
{
// compute the effect of ith neuron on the output error
double neuron_error = 0;
//#pragma omp parallel for reduction(+:neuron_error)
for (j = 0; j < output_size; j++)
{
neuron_error += model->layer[l + 1]->weights[j][i] * delta[l + 1][j];
}
// compute the gradient
delta[l][i] = model->layer[l]->derivative(model->layer[l]->net_inputs[i]) * neuron_error;
}
}
// set the inputs for the weight update
inputs = l == 0 ? data(float, input, s, 0) : model->layer[l - 1]->outputs;
// update the gradients
for (j = 0; j < model->layer[l]->num_neuron; j++)
{
// find the sum of gradient over the current batch (weights)
for (i = 0; i < input_size; i++)
{
gradient[l][j][i] += delta[l][j] * inputs[i];
}
// find the sum of gradient over the current batch (bias)
gradient[l][j][input_size] += delta[l][j];
}
}
}
// increase the number of batch iteration
++batchiter;
// now update the weights using the gradient changes over the all network
for (l = 0; l < model->num_layer; l++)
{
uint32_t input_size = get_input_size(model, l);
uint32_t output_size = get_output_size(model, l);
// update the weights using rmsprop
if(model->optimizer == ANN_OPTIMIZER_RMSPROP)
{
for (j = 0; j < model->layer[l]->num_neuron; j++)
{
// update the weights
for (i = 0; i < input_size + 1; i++)
{
// compute the moving average second moments of the gradient for RMSProp algorithms
auxptr1[l][j][i] = model->rho * auxptr1[l][j][i] + (1-model->rho) * gradient[l][j][i] * gradient[l][j][i];
float rms = sqrt(auxptr1[l][j][i]) + model->epsilon;
// compute the descent direction using momentum and RMSprop
auxptr2[l][j][i] = model->momentum * auxptr2[l][j][i] + (1 - model->momentum) * gradient[l][j][i] / rms;
// update the weights
model->layer[l]->weights[j][i] -= model->eta * auxptr2[l][j][i];
// set the batch sum to zero
gradient[l][j][i] = 0;
}
}
}
// update the weights using adadelta
// Paper: ADADELTA: AN ADAPTIVE LEARNING RATE METHOD
else if(model->optimizer == ANN_OPTIMIZER_ADADELTA)
{
for (j = 0; j < model->layer[l]->num_neuron; j++)
{
// update the weights
for (i = 0; i < input_size + 1; i++)
{
// compute the moving average second moments of the gradient for ADADELTA algorithms
auxptr1[l][j][i] = model->rho * auxptr1[l][j][i] + (1-model->rho) * gradient[l][j][i] * gradient[l][j][i];
float rmsDg = sqrt(auxptr1[l][j][i]) + model->epsilon;
float rmsDx = sqrt(auxptr2[l][j][i]) + model->epsilon;
// compute the update
float deltaW = rmsDx * gradient[l][j][i] / rmsDg;
// compute the moving average second moments of the deltaX
auxptr2[l][j][i] = model->rho * auxptr2[l][j][i] + (1-model->rho) * deltaW * deltaW;
// update the weights
model->layer[l]->weights[j][i] -= deltaW;
// set the batch sum to zero
gradient[l][j][i] = 0;
}
}
}
// update the weights using ADAM
// Paper: ADAM: A METHOD FOR STOCHASTIC OPTIMIZATION
else if(model->optimizer == ANN_OPTIMIZER_ADAM)
{
// do batch normalization and optmization
float rhoNormalizer = (1 - powf(model->rho, batchiter));
float betaNormalizer = (1 - powf(model->beta, batchiter));
for (j = 0; j < model->layer[l]->num_neuron; j++)
{
// update the weights
for (i = 0; i < input_size + 1; i++)
{
// compute the moving average moments of the grad for ADAMalgorithms
auxptr1[l][j][i] = model->rho * auxptr1[l][j][i] + (1-model->rho) * gradient[l][j][i];
auxptr2[l][j][i] = model->beta * auxptr2[l][j][i] + (1-model->beta) * gradient[l][j][i] * gradient[l][j][i];
float mhi = auxptr1[l][j][i] / rhoNormalizer;
float vhi = sqrt(auxptr2[l][j][i] / betaNormalizer) + model->epsilon;
// update the weights
model->layer[l]->weights[j][i] -= model->eta * mhi / vhi;
// set the batch sum to zero
gradient[l][j][i] = 0;
}
}
}
// update the weights using momentum
else
{
for (j = 0; j < model->layer[l]->num_neuron; j++)
{
// update the weights
for (i = 0; i < input_size + 1; i++)
{
// compute the descent direction using momentum
auxptr1[l][j][i] = model->momentum * auxptr1[l][j][i] + (1 - model->momentum) * gradient[l][j][i];
// update the weights
model->layer[l]->weights[j][i] -= model->eta * auxptr1[l][j][i];
// set the batch sum to zero
gradient[l][j][i] = 0;
}
}
}
}
// update the remaining sample size for the next iterations
remainingSampleSize -= batchSize;
sampleIndexFromStart += batchSize;
}
// free shuffled indices
free(idx);
}
// TRAINING IS DONE
// free all the variables allocated for training
// free memory allocated for delta
for (l = 0; l < model->num_layer; l++)
{
free(delta[l]);
}
free(delta);
// free the weight update arrays
for (l = 0; l < model->num_layer; l++)
{
// allocate weight update array for each input
for (i = 0; i < model->layer[l]->num_neuron; i++)
{
free(gradient[l][i]);
free(auxptr1[l][i]);
free(auxptr2[l][i]);
}
free(gradient[l]);
free(auxptr1[l]);
free(auxptr2[l]);
}
free(gradient);
free(auxptr1);
free(auxptr2);
}
// train the network with the given inputs and net configurations
return_t ann_predict(matrix_t *input, matrix_t *output, struct ann_t *model)
{
check_null(model);
int cond0 = cols(input) == model->num_feature;
check_condition(cond0, ERROR, "given model has not been trained for the current input");
int cond1 = is_numeric(input) && is_numeric(output);
check_condition(cond1, ERROR_TYPE_MISMATCH, "input must be a numeric matrix");
int cond2 = channels(input) == 1;
check_condition(cond2, ERROR_DIMENSION_MISMATCH, "input must be one-dimensional matrix");
// resize the ouput matrix
matrix_resize(output, rows(input), model->num_class, 1);
// start iterations here
int s, l, i, j;
// for all training sample
for (s = 0; s < rows(input); s++)
{
// feed forward based on that input
float *inputs = data(float, input, s, 0);
float *outputs = data(float, output, s, 0);
for (l = 0; l < model->num_layer; l++)
{
uint32_t input_size = get_input_size(model, l);
// for each neuron in the hidden layer
for (i = 0; i < model->layer[l]->num_neuron; i++)
{
// compute the neuron net input
double neuron_signal = model->layer[l]->weights[i][input_size];
for (j = 0; j < input_size; j++)
{
neuron_signal += model->layer[l]->weights[i][j] * inputs[j];
}
// set the net input and outputs for the current layer
model->layer[l]->net_inputs[i] = neuron_signal;
model->layer[l]->outputs[i] = model->layer[l]->activation(neuron_signal);
}
// set the inputs of the next layer as the output of the current layer
inputs = model->layer[l]->outputs;
}
// here the inputs array keeps the pointer to the predicted values
memcpy(outputs, inputs, model->num_class * sizeof(float));
}
}
/// struct ann_t model import/export functions
struct ann_t *ann_read(const char *filename)
{
// create the file with the given key value pair
FILE *fp = imlab_fopen(filename, "rb", "ann");
check_file(fp, NULL);
// create input net
struct ann_t *net = (struct ann_t *)calloc(1, sizeof(struct ann_t));
// now read the options
fread(&net->optimizer, sizeof(uint32_t), 1, fp);
fread(&net->max_iter, sizeof(uint32_t), 1, fp);
fread(&net->batch_size, sizeof(uint32_t), 1, fp);
fread(&net->epsilon, sizeof(float), 1, fp);
fread(&net->eta, sizeof(float), 1, fp);
fread(&net->momentum, sizeof(float), 1, fp);
fread(&net->rho, sizeof(float), 1, fp);
fread(&net->beta, sizeof(float), 1, fp);
// read the array sizes
fread(&net->num_feature, sizeof(uint32_t), 1, fp);
fread(&net->num_class, sizeof(uint32_t), 1, fp);
fread(&net->num_layer, sizeof(uint32_t), 1, fp);
// allocate spaces
if (net->num_layer > 0)
{
net->layer_type = (char *)calloc(net->num_layer, sizeof(char));
net->num_neuron = (uint32_t *)calloc(net->num_layer, sizeof(uint32_t));
net->layer = (struct layer_t **)calloc(net->num_layer, sizeof(struct layer_t *));
// now read the number of neurons and layer types
fread(net->layer_type, sizeof(char), net->num_layer, fp);
fread(net->num_neuron, sizeof(uint32_t), net->num_layer, fp);
// read the layers
uint32_t l, i;
for (l = 0; l < net->num_layer; l++)
{
uint32_t input_size = get_input_size(net, l);
// create a layer
net->layer[l] = layer_create(input_size, net->num_neuron[l], net->layer_type[l]);
for (i = 0; i < net->layer[l]->num_neuron; i++)
{
fread(net->layer[l]->weights[i], sizeof(float), input_size + 1, fp);
}
}
}
else
{
net->layer_type = NULL;
net->num_neuron = NULL;
net->layer = NULL;
}
// close the file
fclose(fp);
//done
return net;
}
return_t ann_write(struct ann_t *net, const char *filename)
{
// open the file with the given key value pair
FILE *fp = imlab_fopen(filename, "wb", "ann");
check_file(fp);
// now read the options
fwrite(&net->optimizer, sizeof(uint32_t), 1, fp);
fwrite(&net->max_iter, sizeof(uint32_t), 1, fp);
fwrite(&net->batch_size, sizeof(uint32_t), 1, fp);
fwrite(&net->epsilon, sizeof(float), 1, fp);
fwrite(&net->eta, sizeof(float), 1, fp);
fwrite(&net->momentum, sizeof(float), 1, fp);
fwrite(&net->rho, sizeof(float), 1, fp);
fwrite(&net->beta, sizeof(float), 1, fp);
// read the array sizes
fwrite(&net->num_feature, sizeof(uint32_t), 1, fp);
fwrite(&net->num_class, sizeof(uint32_t), 1, fp);
fwrite(&net->num_layer, sizeof(uint32_t), 1, fp);
if(net->num_layer > 0)
{
// now read the number of neurons and layer types
fwrite(net->layer_type, sizeof(char), net->num_layer, fp);
fwrite(net->num_neuron, sizeof(uint32_t), net->num_layer, fp);
// read the layers
uint32_t l, i;
for (l = 0; l < net->num_layer; l++)
{
uint32_t input_size = get_input_size(net, l);
for (i = 0; i < net->layer[l]->num_neuron; i++)
{
fwrite(net->layer[l]->weights[i], sizeof(float), input_size + 1, fp);
}
}
}
// close the file
fclose(fp);
// done return
return SUCCESS;
}
|
utils.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <complex.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#include <mkl.h>
#include <mkl_types.h>
#include "utils.h"
#define PI 3.14159265358979323846
#define CCONST 0.262465831
#define OPSIZE 9
void affine_transform(double* out, double* op, double* inv) {
//0 1 2 3
//4 5 6 7
//8 9 10 11
//12 13 14 15
double in[4] = {inv[0], inv[1], inv[2], 1};
out[0] = op[0]*in[0] + op[1]*in[1] + op[2]*in[2] + op[3]*in[3];
out[1] = op[4]*in[0] + op[5]*in[1] + op[6]*in[2] + op[7]*in[3];
out[2] = op[8]*in[0] + op[9]*in[1] + op[10]*in[2] + op[11]*in[3];
}
void rotation_transform(double* out, double* op, double* inv) {
double in[3] = {inv[0], inv[1], inv[2]};
out[0] = op[0]*in[0] + op[1]*in[1] + op[2]*in[2];
out[1] = op[3]*in[0] + op[4]*in[1] + op[5]*in[2];
out[2] = op[6]*in[0] + op[7]*in[1] + op[8]*in[2];
}
void vcross(double* res, double* top, double* bottom) {
res[0] = top[1] * bottom[2] - top[2] * bottom[1];
res[1] = top[2] * bottom[0] - top[0] * bottom[2];
res[2] = top[0] * bottom[1] - top[1] * bottom[0];
}
double dot(double* x1, double* x2) {
return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2];
}
double mag(double* x1) {
return pow(dot(x1, x1), 0.5);
}
double determinant(double* m) {
return m[0] * m[4] * m[8]
+ m[1] * m[5] * m[6]
+ m[2] * m[3] * m[7]
- m[2] * m[4] * m[6]
- m[1] * m[3] * m[8]
- m[0] * m[5] * m[7];
}
void min_cart_path(double* coord, double* center, double* lattice, double* path, double* r) {
*r = INFINITY;
double testvec[3]= {0,0,0};
double testdist;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
for (int k = -1; k <= 1; k++) {
testvec[0] = coord[0] + i - center[0];
testvec[1] = coord[1] + j - center[1];
testvec[2] = coord[2] + k - center[2];
frac_to_cartesian(testvec, lattice);
testdist = mag(testvec);
if (testdist < *r) {
path[0] = testvec[0];
path[1] = testvec[1];
path[2] = testvec[2];
*r = testdist;
}
}
}
}
}
void frac_from_spherical(double* ion_frac, double r, double theta, double phi,
double* lattice, double* reclattice, double* result) {
double cart[3];
cart[0] = r * sin(theta) * cos(phi);
cart[1] = r * sin(theta) * sin(phi);
cart[2] = r * cos(theta);
cartesian_to_frac(cart, reclattice);
result[0] = fmod(cart[0] + ion_frac[0], 1.00);
result[1] = fmod(cart[1] + ion_frac[1], 1.00);
result[2] = fmod(cart[2] + ion_frac[2], 1.00);
if (result[0] < 0) result[0] += 1;
if (result[1] < 0) result[1] += 1;
if (result[2] < 0) result[2] += 1;
}
void trilinear_interpolate_values(double complex* x, double* frac, int* fftg, double complex* values) {
//values: c000, c001, c010, c011, c100, c101, c110, c111
int i = (int) (frac[0] * fftg[0]);
int j = (int) (frac[1] * fftg[1]);
int k = (int) (frac[2] * fftg[2]);
int ip = (i+1)%fftg[0];
int jp = (j+1)%fftg[1];
int kp = (k+1)%fftg[2];
int ind[8];
ind[0] = i*fftg[1]*fftg[2] + j*fftg[2] + k;
ind[1] = i*fftg[1]*fftg[2] + j*fftg[2] + kp;
ind[2] = i*fftg[1]*fftg[2] + jp*fftg[2] + k;
ind[3] = i*fftg[1]*fftg[2] + jp*fftg[2] + kp;
ind[4] = ip*fftg[1]*fftg[2] + j*fftg[2] + k;
ind[5] = ip*fftg[1]*fftg[2] + j*fftg[2] + kp;
ind[6] = ip*fftg[1]*fftg[2] + jp*fftg[2] + k;
ind[7] = ip*fftg[1]*fftg[2] + jp*fftg[2] + kp;
for (int n = 0; n < 8; n++) values[n] = x[ind[n]];
}
double complex trilinear_interpolate(double complex* c, double* frac, int* fftg) {
//values: c000, c001, c010, c011, c100, c101, c110, c111
double d[3];
d[0] = fmod(frac[0] * fftg[0], 1.0);
d[1] = fmod(frac[1] * fftg[1], 1.0);
d[2] = fmod(frac[2] * fftg[2], 1.0);
//printf("%lf %lf %lf\n", d[0], d[1], d[2]);
double complex c00 = c[0] * (1-d[0]) + c[4] * d[0];
double complex c01 = c[1] * (1-d[0]) + c[5] * d[0];
double complex c10 = c[2] * (1-d[0]) + c[6] * d[0];
double complex c11 = c[3] * (1-d[0]) + c[7] * d[0];
double complex c0 = c00 * (1-d[1]) + c10 * d[1];
double complex c1 = c01 * (1-d[1]) + c11 * d[1];
return c0 * (1-d[2]) + c1 * d[2];
}
double dist_from_frac(double* coords1, double* coords2, double* lattice) {
double f1 = fmin(fabs(coords1[0]-coords2[0]), 1-fabs(coords1[0]-coords2[0]));
double f2 = fmin(fabs(coords1[1]-coords2[1]), 1-fabs(coords1[1]-coords2[1]));
double f3 = fmin(fabs(coords1[2]-coords2[2]), 1-fabs(coords1[2]-coords2[2]));
return pow(pow(f1*lattice[0]+f2*lattice[3]+f3*lattice[6], 2)
+ pow(f1*lattice[1]+f2*lattice[4]+f3*lattice[7], 2)
+ pow(f1*lattice[2]+f2*lattice[5]+f3*lattice[8], 2), 0.5);
}
void frac_to_cartesian(double* coord, double* lattice) {
double temp[3] = {0,0,0};
temp[0] = coord[0] * lattice[0] + coord[1] * lattice[3] + coord[2] * lattice[6];
temp[1] = coord[0] * lattice[1] + coord[1] * lattice[4] + coord[2] * lattice[7];
temp[2] = coord[0] * lattice[2] + coord[1] * lattice[5] + coord[2] * lattice[8];
coord[0] = temp[0];
coord[1] = temp[1];
coord[2] = temp[2];
}
void cartesian_to_frac(double* coord, double* reclattice) {
double temp[3] = {0,0,0};
temp[0] = coord[0] * reclattice[0] + coord[1] * reclattice[1] + coord[2] * reclattice[2];
temp[1] = coord[0] * reclattice[3] + coord[1] * reclattice[4] + coord[2] * reclattice[5];
temp[2] = coord[0] * reclattice[6] + coord[1] * reclattice[7] + coord[2] * reclattice[8];
coord[0] = temp[0] / 2 / PI;
coord[1] = temp[1] / 2 / PI;
coord[2] = temp[2] / 2 / PI;
}
void free_rayleigh_set_list(rayleigh_set_t* sets, int num_projs) {
for (int i = 0; i < num_projs; i++)
free(sets[i].terms);
free(sets);
}
void free_projection_list(projection_t* projlist, int num) {
for (int i = 0; i < num; i++) {
free(projlist[i].ms);
free(projlist[i].ls);
free(projlist[i].ns);
free(projlist[i].overlaps);
}
free(projlist);
}
void clean_wave_projections(pswf_t* wf) {
for (int i = 0; i < wf->nwk * wf->nspin; i++) {
kpoint_t* kpt = wf->kpts[i];
for (int b = 0; b < kpt->num_bands; b++) {
if (kpt->bands[b]->wave_projections != NULL) {
free_projection_list(kpt->bands[b]->wave_projections, wf->wp_num);
kpt->bands[b]->wave_projections = NULL;
}
}
}
}
void free_kpoint(kpoint_t* kpt, int num_elems, int num_sites, int wp_num, int* num_projs) {
for (int b = 0; b < kpt->num_bands; b++) {
band_t* curr_band = kpt->bands[b];
free(curr_band->Cs);
if (curr_band->projections != NULL) {
free_projection_list(curr_band->projections, num_sites);
}
if (curr_band->wave_projections != NULL) {
free_projection_list(curr_band->wave_projections, wp_num);
}
if (curr_band->up_projections != NULL) {
free_projection_list(curr_band->up_projections, num_sites);
}
if (curr_band->down_projections != NULL) {
free_projection_list(curr_band->down_projections, num_sites);
}
if (curr_band->CRs != NULL) {
mkl_free(curr_band->CRs);
}
if (curr_band->CAs != NULL) {
mkl_free(curr_band->CAs);
}
free(curr_band);
}
if (kpt->expansion != NULL) {
for (int i = 0; i < num_elems; i++)
free_rayleigh_set_list(kpt->expansion[i], num_projs[i]);
free(kpt->expansion);
}
free(kpt->Gs);
free(kpt->bands);
free(kpt->k);
free(kpt);
}
void free_ppot(ppot_t* pp) {
for (int i = 0; i < pp->num_projs; i++) {
free(pp->funcs[i].proj);
free(pp->funcs[i].pswave);
free(pp->funcs[i].aewave);
free(pp->funcs[i].diffwave);
free(pp->funcs[i].kwave);
free(pp->funcs[i].smooth_diffwave);
for (int j = 0; j < 3; j++) {
free(pp->funcs[i].proj_spline[j]);
free(pp->funcs[i].aewave_spline[j]);
free(pp->funcs[i].pswave_spline[j]);
free(pp->funcs[i].diffwave_spline[j]);
free(pp->funcs[i].kwave_spline[j]);
free(pp->funcs[i].smooth_diffwave_spline[j]);
}
free(pp->funcs[i].proj_spline);
free(pp->funcs[i].aewave_spline);
free(pp->funcs[i].pswave_spline);
free(pp->funcs[i].diffwave_spline);
free(pp->funcs[i].kwave_spline);
free(pp->funcs[i].smooth_diffwave_spline);
}
free(pp->funcs);
free(pp->wave_grid);
free(pp->kwave_grid);
free(pp->proj_grid);
free(pp->smooth_grid);
free(pp->pspw_overlap_matrix);
free(pp->aepw_overlap_matrix);
free(pp->diff_overlap_matrix);
}
void free_real_proj(real_proj_t* proj) {
free(proj->values);
}
void free_pswf(pswf_t* wf) {
for (int i = 0; i < wf->nwk * wf->nspin; i++)
free_kpoint(wf->kpts[i], wf->num_elems, wf->num_sites, wf->wp_num, wf->num_projs);
if (wf->overlaps != NULL) {
for (int i = 0; i < wf->num_aug_overlap_sites; i++)
free(wf->overlaps[i]);
free(wf->overlaps);
}
if (wf->num_projs != NULL) {
free(wf->num_projs);
}
free(wf->kpts);
free(wf->G_bounds);
free(wf->lattice);
free(wf->reclattice);
if (wf->pps != NULL) {
free_ppot_list(wf->pps, wf->num_elems);
}
if (wf->dcoords != NULL) {
free(wf->dcoords);
}
if (wf->fftg != NULL) {
free(wf->fftg);
}
free(wf);
}
void free_real_proj_site(real_proj_site_t* site) {
for (int i = 0; i < site->total_projs; i++) {
free_real_proj(site->projs + i);
}
free(site->projs);
free(site->indices);
free(site->coord);
free(site->paths);
}
void free_ptr(void* ptr) {
free(ptr);
}
void free_real_proj_site_list(real_proj_site_t* sites, int length) {
for (int i = 0; i < length; i++) {
free_real_proj_site(sites + i);
}
free(sites);
}
void free_ppot_list(ppot_t* pps, int length) {
for (int i = 0; i < length; i++) {
free_ppot(pps + i);
}
free(pps);
}
int min(int a, int b) {
if (a > b)
return b;
else
return a;
}
int max(int a, int b) {
if (a < b)
return b;
else
return a;
}
double* get_occs(pswf_t* wf) {
kpoint_t** kpts = wf->kpts;
double* occs = (double*) malloc(wf->nwk*wf->nband*wf->nspin*sizeof(double));
CHECK_ALLOCATION(occs);
int NUM_KPTS = wf->nwk * wf->nspin;
for (int kpt_num = 0; kpt_num < NUM_KPTS; kpt_num++) {
for (int band_num = 0; band_num < wf->nband; band_num++) {
occs[band_num*NUM_KPTS+kpt_num] = kpts[kpt_num]->bands[band_num]->occ;
}
}
return occs;
}
int get_nband(pswf_t* wf) {
return wf->nband;
}
int get_nwk(pswf_t* wf) {
return wf->nwk;
}
int get_nspin(pswf_t* wf) {
return wf->nspin;
}
double get_encut(pswf_t* wf) {
return wf->encut;
}
int is_ncl(pswf_t* wf) {
return wf->is_ncl;
}
double get_energy(pswf_t* wf, int band, int kpt, int spin) {
return wf->kpts[kpt+spin*wf->nwk]->bands[band]->energy;
}
double get_occ(pswf_t* wf, int band, int kpt, int spin) {
return wf->kpts[kpt+spin*wf->nwk]->bands[band]->occ;
}
void set_num_sites(pswf_t* wf, int nsites) {
wf->num_sites = nsites;
}
double legendre(int l, int m, double x) {
double total = 0;
if (m < 0) return pow(-1.0, m) * fac(l+m) / fac(l-m) * legendre(l, -m, x);
for (int n = l; n >= 0 && 2*n-l-m >= 0; n--) {
total += pow(x, 2*n-l-m) * fac(2*n) / fac(2*n-l-m) / fac(n) / fac(l-n) * pow(-1, l-n);
}
return total * pow(-1, m) * pow(1 - x * x, m/2.0) / pow(2, l);
}
void legendre_coeff(double* ptr, int l, int m) {
// assumes ptr is cleared
double prefac = pow(-1, m) / pow(2, l);
if (m < 0) {
prefac *= pow(-1.0, m) * fac(l+m) / fac(l-m);
}
m = abs(m);
for (int n = l; n >= 0 && 2*n-l-m >= 0; n--) {
ptr[n] = fac(2*n) / fac(2*n-l-m) / fac(n) / fac(l-n) * pow(-1, l-n) * prefac;
}
}
double* legendre_product(int l1, int l2, int m1, int m2) {
int m = m2 - m1;
int maxl = l1 + l2;
double* lp1 = (double*) calloc((l1+1), sizeof(double));
double* lp2 = (double*) calloc((l2+1), sizeof(double));
double* polynomial = (double*) calloc((maxl+1), sizeof(double));
double* test = (double*) calloc((maxl+1), sizeof(double));
double* coeff = (double*) calloc((maxl+1), sizeof(double));
legendre_coeff(lp1, l1, m1);
legendre_coeff(lp2, l2, m2);
for (int n1 = 0; n1 <= l1; n1++) {
for (int n2 = 0; n2 <= l2; n2++) {
polynomial[n1+n2] += lp1[n1] * lp2[n2];
}
}
for (int l = maxl; l >= abs(m); l--) {
legendre_coeff(test, l, m);
coeff[l] = polynomial[l] / test[l];
for (int lp = abs(m); lp <= maxl; lp++) {
polynomial[lp] -= coeff[l] * test[lp];
test[lp] = 0;
}
}
free(lp1);
free(lp2);
free(polynomial);
free(test);
return coeff;
}
double fac(int n) {
int m = 1;
int t = 1;
while (m <= n) {
t *= m;
m++;
}
return (double)t;
}
double complex Ylm(int l, int m, double theta, double phi) {
//printf("%lf %lf %lf\n", pow((2*l+1)/(4*PI)*fac(l-m)/fac(l+m), 0.5), legendre(l, m, cos(theta)),
// creal(cexp(I*m*phi)));
//double complex multiplier = 0;
//if (m == 0) multiplier = 1;
//else if (m < 0) multiplier = pow(2.0, 0.5) * cos(-m*phi);
//else multiplier = pow(-1,m) * pow(2.0, 0.5) * sin(m*phi);
return pow((2*l+1)/(4*PI)*fac(l-m)/fac(l+m), 0.5) *
legendre(l, m, cos(theta))* cexp(I*m*phi);
}
double complex Ylm2(int l, int m, double costheta, double phi) {
//printf("%lf %lf %lf\n", pow((2*l+1)/(4*PI)*fac(l-m)/fac(l+m), 0.5), legendre(l, m, cos(theta)),
// creal(cexp(I*m*phi)));
//double complex multiplier = 0;
//if (m == 0) multiplier = 1;
//else if (m < 0) multiplier = pow(2.0, 0.5) * cos(-m*phi);
//else multiplier = pow(-1,m) * pow(2.0, 0.5) * sin(m*phi);
return pow((2*l+1)/(4*PI)*fac(l-m)/fac(l+m), 0.5) *
legendre(l, m, costheta) * cexp(I*m*phi);
}
double proj_interpolate(double r, double rmax, int size, double* x,
double* proj, double** proj_spline) {
if (r > x[size-1]) return 0;
if (r < x[0]) return proj[0];
int ind = min((int)(r/rmax*size), size-2);
double rem = r - x[ind];
double radval = proj[ind] + rem * (proj_spline[0][ind] +
rem * (proj_spline[1][ind] +
rem * proj_spline[2][ind]));
return radval;
}
double wave_interpolate(double r, int size, double* x, double* f,
double** wave_spline) {
if (r > x[size-1]) return 0;
if (r < x[0]) return f[0];
int ind = min((int) (log(r/x[0]) / log(x[1]/x[0])), size-2);
double rem = r - x[ind];
return f[ind] + rem * (wave_spline[0][ind] +
rem * (wave_spline[1][ind] +
rem * wave_spline[2][ind]));
}
double complex wave_value(funcset_t funcs, int size, double* x, int m,
double* ion_pos, double* pos, double* lattice) {
double temp[3] = {0,0,0};
double r = 0;
min_cart_path(pos, ion_pos, lattice, temp, &r);
double ae_radial_val = wave_interpolate(r, size, x, funcs.aewave, funcs.aewave_spline);
double ps_radial_val = wave_interpolate(r, size, x, funcs.pswave, funcs.pswave_spline);
double radial_val = (ae_radial_val - ps_radial_val);
if (r < x[0])
radial_val /= x[0];
else
radial_val /= r;
if (r==0) return Ylm(funcs.l, m, 0, 0) * radial_val;
double theta = 0, phi = 0;
theta = acos(temp[2]/r);
if (r - fabs(temp[2]) == 0) phi = 0;
else phi = acos(temp[0] / pow(temp[0]*temp[0] + temp[1]*temp[1], 0.5));
if (temp[1] < 0) phi = 2*PI - phi;
double complex sph_val = Ylm(funcs.l, m, theta, phi);
return radial_val * sph_val;
}
double complex wave_value2(double* x, double* wave, double** spline, int size,
int l, int m, double* pos) {
double r = mag(pos);
double radial_val = wave_interpolate(r, size, x, wave, spline);
if (r < x[0])
radial_val /= x[0];
else
radial_val /= r;
if (r==0) return Ylm(l, m, 0, 0) * radial_val;
double theta = 0, phi = 0;
theta = acos(pos[2]/r);
if (r - fabs(pos[2]) == 0) phi = 0;
else phi = acos(pos[0] / pow(pos[0]*pos[0] + pos[1]*pos[1], 0.5));
if (pos[1] < 0) phi = 2*PI - phi;
double complex sph_val = Ylm(l, m, theta, phi);
return radial_val * sph_val;
}
double complex proj_value_helper(double r, double rmax, int size,
double* temp, double* x, double* f, double** s, int l, int m) {
double radial_val = proj_interpolate(r, rmax, size, x, f, s);
if (r == 0) return Ylm(l, m, 0, 0) * radial_val;
double theta = 0, phi = 0;
theta = acos(temp[2]/r);
if (r - fabs(temp[2]) == 0) phi = 0;
else phi = acos(temp[0] / pow(temp[0]*temp[0] + temp[1]*temp[1], 0.5));
if (temp[1] < 0) phi = 2*PI - phi;
double complex sph_val = Ylm(l, m, theta, phi);
return radial_val * sph_val;
}
double complex proj_value(funcset_t funcs, double* x, int m, double rmax,
int size, double* ion_pos, double* pos, double* lattice) {
double temp[3] = {0,0,0};
double r = 0;
min_cart_path(pos, ion_pos, lattice, temp, &r);
return proj_value_helper(r, rmax, size, temp, x, funcs.proj,
funcs.proj_spline, funcs.l, m);
}
double complex smooth_wave_value(funcset_t funcs, double* x, int m, double rmax,
int size, double* ion_pos, double* pos, double* lattice) {
double temp[3] = {0,0,0};
double r = 0;
min_cart_path(pos, ion_pos, lattice, temp, &r);
return proj_value_helper(r, rmax, size, temp, x, funcs.smooth_diffwave,
funcs.smooth_diffwave_spline, funcs.l, m);
}
void setup_site(real_proj_site_t* sites, ppot_t* pps, int num_sites, int* site_nums,
int* labels, double* coords, double* lattice, int* fftg, int pr0_pw1) {
double vol = determinant(lattice);
for (int s = 0; s < num_sites; s++) {
int i = site_nums[s];
sites[s].index = i;
sites[s].elem = labels[i];
sites[s].gridsize = pps[labels[i]].proj_gridsize;
sites[s].num_projs = pps[labels[i]].num_projs;
if (pr0_pw1) sites[s].rmax = pps[labels[i]].wave_rmax;
else sites[s].rmax = pps[labels[i]].rmax;
sites[s].total_projs = pps[labels[i]].total_projs;
sites[s].num_indices = 0;
sites[s].coord = malloc(3 * sizeof(double));
CHECK_ALLOCATION(sites[s].coord);
sites[s].coord[0] = coords[3*i+0];
sites[s].coord[1] = coords[3*i+1];
sites[s].coord[2] = coords[3*i+2];
sites[s].indices = calloc(pps[labels[i]].num_cart_gridpts, sizeof(int));
CHECK_ALLOCATION(sites[s].indices);
sites[s].projs = (real_proj_t*) malloc(sites[s].total_projs * sizeof(real_proj_t));
int p = 0;
sites[s].paths = malloc(3*pps[labels[i]].num_cart_gridpts * sizeof(double));
CHECK_ALLOCATION(sites[s].paths);
for (int j = 0; j < sites[s].num_projs; j++) {
for (int m = -pps[labels[i]].funcs[j].l; m <= pps[labels[i]].funcs[j].l; m++) {
sites[s].projs[p].l = pps[labels[i]].funcs[j].l;
sites[s].projs[p].m = m;
sites[s].projs[p].func_num = j;
sites[s].projs[p].values = malloc(pps[labels[i]].num_cart_gridpts * sizeof(double complex));
CHECK_ALLOCATION(sites[s].projs[p].values);
p++;
}
}
}
//#pragma omp parallel for
for (int s = 0; s < num_sites; s++) {
int p = site_nums[s];
double res[3] = {0,0,0};
double frac[3] = {0,0,0};
double testcoord[3] = {0,0,0};
vcross(res, lattice+3, lattice+6);
int grid1 = (int) (mag(res) * sites[s].rmax / vol * fftg[0]) + 1;
vcross(res, lattice+0, lattice+6);
int grid2 = (int) (mag(res) * sites[s].rmax / vol * fftg[1]) + 1;
vcross(res, lattice+0, lattice+3);
int grid3 = (int) (mag(res) * sites[s].rmax / vol * fftg[2]) + 1;
int center1 = (int) round(coords[3*p+0] * fftg[0]);
int center2 = (int) round(coords[3*p+1] * fftg[1]);
int center3 = (int) round(coords[3*p+2] * fftg[2]);
int ii=0, jj=0, kk=0;
double R0 = (pps[labels[p]].proj_gridsize-1) * sites[s].rmax
/ pps[labels[s]].proj_gridsize;
for (int i = -grid1 + center1; i <= grid1 + center1; i++) {
for (int j = -grid2 + center2; j <= grid2 + center2; j++) {
for (int k = -grid3 + center3; k <= grid3 + center3; k++) {
testcoord[0] = (double) i / fftg[0] - coords[3*p+0];
testcoord[1] = (double) j / fftg[1] - coords[3*p+1];
testcoord[2] = (double) k / fftg[2] - coords[3*p+2];
frac_to_cartesian(testcoord, lattice);
if (mag(testcoord) < R0) {
ii = (i%fftg[0] + fftg[0]) % fftg[0];
jj = (j%fftg[1] + fftg[1]) % fftg[1];
kk = (k%fftg[2] + fftg[2]) % fftg[2];
frac[0] = (double) ii / fftg[0];
frac[1] = (double) jj / fftg[1];
frac[2] = (double) kk / fftg[2];
sites[s].indices[sites[s].num_indices] = ii*fftg[1]*fftg[2] + jj*fftg[2] + kk;
sites[s].paths[3*sites[s].num_indices+0] = testcoord[0];
sites[s].paths[3*sites[s].num_indices+1] = testcoord[1];
sites[s].paths[3*sites[s].num_indices+2] = testcoord[2];
for (int n = 0; n < sites[s].total_projs; n++) {
if (pr0_pw1)
sites[s].projs[n].values[sites[s].num_indices] = smooth_wave_value(
pps[labels[p]].funcs[sites[s].projs[n].func_num],
pps[labels[p]].smooth_grid, sites[s].projs[n].m, sites[s].rmax,
pps[labels[p]].proj_gridsize, coords+3*p, frac, lattice);
else
sites[s].projs[n].values[sites[s].num_indices] = proj_value(
pps[labels[p]].funcs[sites[s].projs[n].func_num],
pps[labels[p]].proj_grid, sites[s].projs[n].m, sites[s].rmax,
pps[labels[p]].proj_gridsize, coords+3*p, frac, lattice);
}
sites[s].num_indices++;
//if (sites[s].num_indices >= pps[labels[p]].num_cart_gridpts)
// printf("SETUP_SITE ERROR %d %d %d\n", sites[s].num_indices, p, pr0_pw1);
}
}
}
}
}
}
//adapted from VASP source code
double** spline_coeff(double* x, double* y, int N) {
double** coeff = (double**) malloc(3 * sizeof(double*));
CHECK_ALLOCATION(coeff);
coeff[0] = (double*) malloc(N * sizeof(double));
coeff[1] = (double*) malloc(N * sizeof(double));
coeff[2] = (double*) malloc(N * sizeof(double));
CHECK_ALLOCATION(coeff[0]);
CHECK_ALLOCATION(coeff[1]);
CHECK_ALLOCATION(coeff[2]);
double d1p1 = (y[1] - y[0]) / (x[1] - x[0]);
if (d1p1 > 0.99E30) {
coeff[1][0] = 0;
coeff[0][0] = 0;
}
else {
coeff[1][0] = -0.5;
coeff[0][0] = (3 / (x[1] - x[0])) * ((y[1] - y[0]) / (x[1] - x[0]) - d1p1);
}
double s = 0, r = 0;
for (int i = 1; i < N - 1; i++) {
s = (x[i] - x[i-1]) / (x[i+1] - x[i-1]);
r = s * coeff[1][i-1] + 2;
coeff[1][i] = (s - 1) / r;
coeff[0][i] = (6 * ( (y[i+1] - y[i]) / (x[i+1] - x[i]) -
(y[i] - y[i-1]) / (x[i] - x[i-1])) /
(x[i+1] - x[i-1]) - s*coeff[0][i-1]) / r;
}
coeff[0][N-1] = 0;
coeff[1][N-1] = 0;
coeff[2][N-1] = 0;
for (int i = N-2; i >= 0; i--) {
coeff[1][i] = coeff[1][i] * coeff[1][i+1] + coeff[0][i];
}
for (int i = 0; i < N-1; i++) {
s = x[i+1] - x[i];
r = (coeff[1][i+1] - coeff[1][i]) / 6;
coeff[2][i] = r / s;
coeff[1][i] = coeff[1][i] / 2;
coeff[0][i] = (y[i+1]-y[i]) / s - (coeff[1][i] + r) * s;
}
return coeff;
}
double spline_integral(double* x, double* a, double** s, int size) {
double* b = s[0];
double* c = s[1];
double* d = s[2];
double dx = 0;
double integral = 0;
for (int i = 0; i < size - 1; i++) {
dx = x[i+1] - x[i];
integral += dx * (a[i] + dx * (b[i]/2 + dx * (c[i]/3 + d[i]*dx/4)));
}
return integral;
}
void frac_from_index(int index, double* coord, int* fftg) {
int t1 = index / (fftg[1] * fftg[2]);
int t2 = index % (fftg[1] * fftg[2]);
int t3 = t2 % fftg[2];
t2 /= fftg[2];
coord[0] = ((double) t1) / fftg[0];
coord[1] = ((double) t2) / fftg[1];
coord[2] = ((double) t3) / fftg[2];
}
void direction(double* cart, double* dir) {
double theta = 0, phi = 0;
double r = mag(cart);
theta = acos(cart[2]/r);
if (r - fabs(cart[2]) == 0) phi = 0;
else phi = acos(cart[0] / pow(cart[0]*cart[0] + cart[1]*cart[1], 0.5));
if (cart[1] < 0) phi = 2*PI - phi;
dir[0] = theta;
dir[1] = phi;
}
double sph_bessel(double k, double r, int l) {
double x = k * r;
if (l == 0)
return sin(x) / x;
else if (l == 1)
return sin(x) / (x*x) - cos(x) / x;
else if (l == 2)
return (3 / (x*x) -1) * sin(x) / x - 3 * cos(x) / (x*x);
else if (l == 3)
return (15 / (x*x*x) - 6 / x) * sin(x) / x - (15 / (x*x) -1) * cos(x) / x;
else {
printf("ERROR: sph_bessel l too high");
return 0;
}
}
double sbf(double x, int l) {
if (x < 10e-6) {
if (l==0) return 1;
else return 0;
}
double jlm1 = sin(x) / x;
double jl = sin(x) / (x*x) - cos(x) / x;
double jlp1 = 0;
if (l == 0) return jlm1;
if (l == 1) return jl;
for (int ll = 1; ll < l; ll++) {
jlp1 = (2*ll+1)/x*jl - jlm1;
jlm1 = jl;
jl = jlp1;
}
return jlp1;
}
pswf_t* expand_symm_wf(pswf_t* rwf, int num_kpts, int* maps,
double* ops, double* drs, double* kws, int* trs) {
double* lattice = rwf->lattice;
double* reclattice = rwf->reclattice;
pswf_t* wf = (pswf_t*) malloc(sizeof(pswf_t));
wf->num_elems = rwf->num_elems;
wf->num_sites = rwf->num_sites;
wf->pps = NULL;
wf->G_bounds = (int*) malloc(6*sizeof(int));
for (int i = 0; i < 6; i++) {
wf->G_bounds[i] = 0;//rwf->G_bounds[i];
}
wf->kpts = (kpoint_t**) malloc(num_kpts * rwf->nspin * sizeof(kpoint_t*));
wf->nspin = rwf->nspin;
wf->nband = rwf->nband;
wf->nwk = num_kpts;
wf->lattice = (double*) malloc(9*sizeof(double));
wf->reclattice = (double*) malloc(9*sizeof(double));
for (int i = 0; i < 9; i++) {
wf->lattice[i] = lattice[i];
wf->reclattice[i] = reclattice[i];
}
wf->fftg = NULL;
wf->is_ncl = rwf->is_ncl;
wf->num_aug_overlap_sites = 0;
wf->dcoords = NULL;
wf->overlaps = NULL;
wf->num_projs = NULL;
wf->wp_num = 0;
//#pragma omp parallel for
for (int knum = 0; knum < num_kpts * wf->nspin; knum++) {
wf->kpts[knum] = (kpoint_t*) malloc(sizeof(kpoint_t));
double pw[3] = {0,0,0};
int rnum = maps[knum%num_kpts];
int tr = trs[knum%num_kpts];
if (knum >= num_kpts && rwf->nspin ==2) {
rnum += rwf->nwk;
}
kpoint_t* kpt = wf->kpts[knum];
kpoint_t* rkpt = rwf->kpts[rnum];
kpt->up = rkpt->up;
kpt->num_waves = rkpt->num_waves;
kpt->k = (double*) malloc(3 * sizeof(double));
rotation_transform(kpt->k, ops+OPSIZE*(knum%num_kpts), rkpt->k);
if (tr == 1) {
kpt->k[0] *= -1;
kpt->k[1] *= -1;
kpt->k[2] *= -1;
}
double kdiff[3] = {round(kpt->k[0]), round(kpt->k[1]), round(kpt->k[2])};
kpt->k[0] -= kdiff[0];
kpt->k[1] -= kdiff[1];
kpt->k[2] -= kdiff[2];
if (fabs(kpt->k[0] + 0.5) < 0.0001){ kdiff[0] -= 1; kpt->k[0] += 1; }
if (fabs(kpt->k[1] + 0.5) < 0.0001){ kdiff[1] -= 1; kpt->k[1] += 1; }
if (fabs(kpt->k[2] + 0.5) < 0.0001){ kdiff[2] -= 1; kpt->k[2] += 1; }
//printf("OLD KPT %lf %lf %lf\n", rkpt->k[0], rkpt->k[1], rkpt->k[2]);
//printf("NEW KPT %lf %lf %lf\n", kpt->k[0], kpt->k[1], kpt->k[2]);
//kpt->Gs = (int*) malloc(3 * kpt->num_waves * sizeof(int));
kpt->weight = kws[knum%num_kpts];
kpt->num_bands = rkpt->num_bands;
kpt->bands = (band_t**) malloc(kpt->num_bands * sizeof(band_t*));
kpt->expansion = NULL;
int* igall = malloc(3*kpt->num_waves*sizeof(int));
if (igall == NULL) {
ALLOCATION_FAILED();
}
int nb1max = rwf->G_bounds[1] - rwf->G_bounds[0] + 2;
int nb2max = rwf->G_bounds[3] - rwf->G_bounds[2] + 2;
int nb3max = rwf->G_bounds[5] - rwf->G_bounds[4] + 2;
double encut = rwf->encut;
double* b1 = reclattice;
double* b2 = reclattice+3;
double* b3 = reclattice+6;
int ncnt = -1;
for (int ig3 = 0; ig3 <= 2 * nb3max; ig3++) {
int ig3p = ig3;
if (ig3 > nb3max) ig3p = ig3 - 2 * nb3max - 1;
for (int ig2 = 0; ig2 <= 2 * nb2max; ig2++) {
int ig2p = ig2;
if (ig2 > nb2max) ig2p = ig2 - 2 * nb2max - 1;
for (int ig1 = 0; ig1 <= 2 * nb1max; ig1++) {
int ig1p = ig1;
if (ig1 > nb1max) ig1p = ig1 - 2 * nb1max - 1;
double sumkg[3];
for (int j = 0; j < 3; j++) {
sumkg[j] = (kpt->k[0]+ig1p) * b1[j]
+ (kpt->k[1]+ig2p) * b2[j]
+ (kpt->k[2]+ig3p) * b3[j];
}
double gtot = mag(sumkg);
double etot = pow(gtot,2.0) / CCONST;
//printf("%lf %lf\n", etot, gtot);
if (etot <= encut) {
ncnt++;
igall[ncnt*3+0] = ig1p;
igall[ncnt*3+1] = ig2p;
igall[ncnt*3+2] = ig3p;
if (ig1p < wf->G_bounds[0]) wf->G_bounds[0] = ig1p;
else if (ig1p > wf->G_bounds[1]) wf->G_bounds[1] = ig1p;
if (ig2p < wf->G_bounds[2]) wf->G_bounds[2] = ig2p;
else if (ig2p > wf->G_bounds[3]) wf->G_bounds[3] = ig2p;
if (ig3p < wf->G_bounds[4]) wf->G_bounds[4] = ig3p;
else if (ig3p > wf->G_bounds[5]) wf->G_bounds[5] = ig3p;
}
}
}
}
ncnt++;
if (ncnt * 2 == rkpt->num_waves) {
printf("This is an NCL wavefunction!\n");
wf->is_ncl = 1;
for (int iplane = 0; iplane < rkpt->num_waves/2; iplane++) {
igall[3*(rkpt->num_waves/2+iplane)+0] = igall[3*iplane+0];
igall[3*(rkpt->num_waves/2+iplane)+1] = igall[3*iplane+1];
igall[3*(rkpt->num_waves/2+iplane)+2] = igall[3*iplane+2];
}
} else if (ncnt != rkpt->num_waves) {
printf("ERROR %d %d %lf %lf %lf %lf\n", ncnt, kpt->num_waves,
kpt->k[0], kpt->k[1], kpt->k[2], CCONST);
}
kpt->Gs = igall;
int ngx = wf->G_bounds[1] - wf->G_bounds[0] + 1;
int gxmin = wf->G_bounds[0];
int ngy = wf->G_bounds[3] - wf->G_bounds[2] + 1;
int gymin = wf->G_bounds[2];
int ngz = wf->G_bounds[5] - wf->G_bounds[4] + 1;
int gzmin = wf->G_bounds[4];
int* kptinds = (int*) malloc(ngx*ngy*ngz * sizeof(int));
for (int w = 0; w < ngx*ngy*ngz; w++) kptinds[w] = -1;
int gx, gy, gz;
int* gmaps = (int*) malloc(kpt->num_waves * sizeof(int));
float complex* factors = (float complex*) malloc(
kpt->num_waves * sizeof(float complex));
for (int w = 0; w < kpt->num_waves; w++) gmaps[w] = -1;
for (int w = 0; w < rkpt->num_waves; w++) {
gx = kpt->Gs[3*w+0];
gy = kpt->Gs[3*w+1];
gz = kpt->Gs[3*w+2];
kptinds[(gx-gxmin)*ngy*ngz + (gy-gymin)*ngz + (gz-gzmin)] = w;
}
double* dr = drs + 3 * (knum%num_kpts);
double* op = ops+OPSIZE*(knum%num_kpts);
for (int g = 0; g < kpt->num_waves; g++) {
//pw[0] = rkpt->k[0] + rkpt->Gs[3*g+0];
//pw[1] = rkpt->k[1] + rkpt->Gs[3*g+1];
//pw[2] = rkpt->k[2] + rkpt->Gs[3*g+2];
pw[0] = rkpt->Gs[3*g+0];
pw[1] = rkpt->Gs[3*g+1];
pw[2] = rkpt->Gs[3*g+2];
rotation_transform(pw, ops+OPSIZE*(knum%num_kpts), pw);
if (tr == 1) {
pw[0] *= -1;
pw[1] *= -1;
pw[2] *= -1;
}
//pw[0] -= kpt->k[0];
//pw[1] -= kpt->k[1];
//pw[2] -= kpt->k[2];
pw[0] += kdiff[0];
pw[1] += kdiff[1];
pw[2] += kdiff[2];
gx = (int) round(pw[0]);
gy = (int) round(pw[1]);
gz = (int) round(pw[2]);
gmaps[kptinds[(gx-gxmin)*ngy*ngz + (gy-gymin)*ngz + (gz-gzmin)]] = g;
if (tr == 0) {
factors[kptinds[(gx-gxmin)*ngy*ngz + (gy-gymin)*ngz + (gz-gzmin)]] = cexpf(
-I * 2 * PI * (dot(kpt->k, dr) + dot(pw, dr)) );
} else {
factors[kptinds[(gx-gxmin)*ngy*ngz + (gy-gymin)*ngz + (gz-gzmin)]] = cexpf(
I * 2 * PI * (dot(kpt->k, dr) + dot(pw, dr)) );
}
if (kptinds[(gx-gxmin)*ngy*ngz + (gy-gymin)*ngz + (gz-gzmin)] < 0) {
printf("ERROR, BAD PLANE WAVE MAPPING %d %d %d %d %d %d %lf %lf %lf\n %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
rkpt->Gs[3*g+0], rkpt->Gs[3*g+1], rkpt->Gs[3*g+2], gx, gy, gz,
pw[0], pw[1], pw[2], op[0],op[1],op[2],op[3],op[4],op[5],op[6],op[7],op[8]);
}
//printf("OLD G %d %d %d\n", okpt->Gs[3*g+0], okpt->Gs[3*g+1], okpt->Gs[3*g+2]);
//printf("NEW G %d %d %d\n", kpt->Gs[3*g+0], kpt->Gs[3*g+1], kpt->Gs[3*g+2]);
}
for (int b = 0; b < kpt->num_bands; b++) {
kpt->bands[b] = (band_t*) malloc(sizeof(band_t));
kpt->bands[b]->n = rkpt->bands[b]->n;
kpt->bands[b]->num_waves = rkpt->bands[b]->num_waves;
kpt->bands[b]->occ = rkpt->bands[b]->occ;
kpt->bands[b]->energy = rkpt->bands[b]->energy;
kpt->bands[b]->Cs = (float complex*) malloc(
kpt->num_waves * sizeof(float complex));
kpt->bands[b]->CRs = NULL;
kpt->bands[b]->CAs = NULL;
kpt->bands[b]->projections = NULL;
kpt->bands[b]->up_projections = NULL;
kpt->bands[b]->down_projections = NULL;
kpt->bands[b]->wave_projections = NULL;
//double total = 0;
for (int w = 0; w < kpt->num_waves; w++) {
if (gmaps[w] < 0) {
printf("ERROR, INCOMPLETE PLANE WAVE MAPPING\n");
}
kpt->bands[b]->Cs[w] = factors[w] * rkpt->bands[b]->Cs[gmaps[w]];
if (tr == 1) {
kpt->bands[b]->Cs[w] = conj(kpt->bands[b]->Cs[w]);
}
//total += cabs(cabs(kpt->bands[b]->Cs[w]) - cabs(okpt->bands[b]->Cs[w]));
}
//printf("energies %lf %lf %e\n", kpt->bands[b]->energy, okpt->bands[b]->energy, total);
}
free(gmaps);
free(kptinds);
free(factors);
}
wf->encut = rwf->encut;
return wf;
}
void CHECK_ALLOCATION(void* ptr) {
if (ptr == NULL) {
ALLOCATION_FAILED();
}
}
void ALLOCATION_FAILED(void) {
printf("ALLOCATION FAILED\n");
exit(-1);
}
void CHECK_STATUS(int status) {
if (status != 0) {
printf("ROUTINE FAILED WITH STATUS %d:\n", status);
char* message = DftiErrorMessage(status);
printf("%s\n", message);
exit(-1);
}
}
double* get_smooth_wave(ppot_t* lst, int num) {
//return lst[num].funcs[0].smooth_diffwave;
return lst[num].smooth_grid;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
taskloop_dispatch.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gnu, intel-16.0
#include "callback.h"
#include <omp.h>
int main() {
unsigned int i, x;
#pragma omp parallel num_threads(2)
{
#pragma omp barrier
#pragma omp master
#pragma omp taskloop grainsize(4)
for (i = 0; i < 16; i++) {
// Make every iteration takes at least 1ms
delay(1000);
}
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin:
// CHECK-SAME: parent_task_id={{[0-9]+}}
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]]
// CHECK-SAME: requested_team_size=2
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID1:[0-9]+]]
// CHECK-SAME: team_size=2, thread_num=0
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_taskloop_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]]
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]], count=16
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: new_task_id=[[TASK_ID0:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: new_task_id=[[TASK_ID1:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: new_task_id=[[TASK_ID2:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: new_task_id=[[TASK_ID3:[0-9]+]]
// CHECK-DAG: {{.*}}: ompt_event_taskloop_chunk_begin:{{.*}}task_id=[[TASK_ID0]]{{.*}}chunk_iterations=4
// CHECK-DAG: {{.*}}: ompt_event_taskloop_chunk_begin:{{.*}}task_id=[[TASK_ID1]]{{.*}}chunk_iterations=4
// CHECK-DAG: {{.*}}: ompt_event_taskloop_chunk_begin:{{.*}}task_id=[[TASK_ID2]]{{.*}}chunk_iterations=4
// CHECK-DAG: {{.*}}: ompt_event_taskloop_chunk_begin:{{.*}}task_id=[[TASK_ID3]]{{.*}}chunk_iterations=4
return 0;
}
|
stencil3d_mdev.c.c | //
// Created by Yonghong Yan on 3/8/16.
//
#if 0
void stencil3d_omp_mdev(long n, long m, long k, REAL *u, int radius, REAL *coeff, int num_its) {
long it; /* iteration */
long u_dimX = n + 2 * radius;
long u_dimY = m + 2 * radius;
long u_dimZ = k + 2 * radius;
int coeff_dimX = 2 * radius + 1;
coeff = coeff + (2 * radius + 1) * radius + radius; /* let coeff point to the center element */
int count = 4*radius+1;
#ifdef SQUARE_SETNCIL
count = coeff_dimX * coeff_dimX * coeff_dimX;
#endif
/* uold should be simpliy allocated on the dev and then copy data from u, here we simplified the initialization */
REAL *uold = (REAL *) malloc(sizeof(REAL) * u_dimX * u_dimY * u_dimZ);
memcpy(uold, u, sizeof(REAL)*u_dimX * u_dimY * u_dimZ);
#pragma omp target data device(*) map(to:n, m, u_dimX, u_dimY, radius, coeff_center, coeff[coeff_dimX][coeff_dimX]) \
map(tofrom:u[u_dimX][u_dimY] dist_data(BLOCK,DUPLICATE) halo(radius,)) map(to:uold[u_dimX][u_dimY] dist_data(BLOCK,DUPLICATE) halo(radius,))
#pragma omp parallel shared(n, m, radius, coeff, num_its, u_dimX, u_dimY, coeff_dimX) private(it) firstprivate(u, uold) //num_threads(/* num of devices + number of cores */)
{
int ix, iy, iz, ir;
/*
#pragma omp target device(*) dist_iteration(BLOCK)
#pragma omp for
for (ix = 0; ix < u_dimX; ix++) {
for (iy = 0; iy < u_dimY; iy++) {
uold[ix * u_dimY + iy] = u[ix * u_dimY + iy];
}
}
*/
for (it = 0; it < num_its; it++) {
#pragma omp target device(*) dist_iteration(BLOCK)
#pragma omp for
for (ix = 0; ix < n; ix++) {
REAL *temp_u = &u[(ix + radius) * u_dimY+radius];
REAL *temp_uold = &uold[(ix + radius) * u_dimY+radius];
for (iy = 0; iy < m; iy++) {
for (iz = 0; iz < k; iz++) {
REAL result = temp_uold[0] * coeff[0];
/* 2/4 way loop unrolling */
for (ir = 1; ir <= radius; ir++) {
result += coeff[ir] * temp_uold[ir]; //horizontal right
result += coeff[-ir] * temp_uold[-ir]; // horizontal left
result += coeff[-ir * coeff_dimX] * temp_uold[-ir * u_dimY]; //vertical up
result += coeff[ir * coeff_dimX] * temp_uold[ir * u_dimY]; // vertical bottom
result += coeff[-ir * coeff_dimX] * temp_uold[-ir * u_dimZ]; //vertical up - Z
result += coeff[ir * coeff_dimX] * temp_uold[ir * u_dimZ]; // vertical bottom - Z
#ifdef SQUARE_SETNCIL
result += coeff[-ir*coeff_dimX-ir] * temp_uold[-ir * u_dimY-ir] // left upper corner
result += coeff[-ir*coeff_dimX+ir] * temp_uold[-ir * u_dimY+ir] // right upper corner
result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimY]-ir] // left bottom corner
result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimY]+ir] // right bottom corner
result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimZ]-ir] // left bottom corner - Z
result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimZ]+ir] // right bottom corner - Z
#endif
}
*temp_u = result/count;
temp_u++;
temp_uold++;
}//z end
}//y end
}
#pragma omp halo_exchange(u);
REAL *tmp = uold;
uold = u;
u = tmp;
} /* End iteration loop */
}
free(uold);
}
#endif
double stencil3d_omp_mdev(long n, long m, long k, REAL *u, int radius, REAL *coeff, int num_its) {
long u_dimX = n + 2 * radius;
long u_dimY = m + 2 * radius;
long u_dimZ = k + 2 * radius;
int coeff_dimX = 2*radius+1;//NOTE Check the correctness
REAL * coeff_center = coeff + (2*radius+1) * radius + radius; /* let coeff point to the center element */
REAL *uold = (REAL *) omp_unified_malloc(sizeof(REAL) * u_dimX * u_dimY* u_dimZ);
memcpy(uold, u, sizeof(REAL)*u_dimX * u_dimY* u_dimZ);
//print_array("Before offloading", "u", u, u_dimX, u_dimY);
double off_init_time = read_timer_ms();
int __top_ndims__;
/**************************************** dist-specific *****************************************/
if (dist_dim == 1 || dist_dim == 2) __top_ndims__ = 1;
else /* dist == 3 */__top_ndims__ = 2;
/************************************************************************************************/
/* use all the devices */
int __num_targets__ = omp_get_num_active_devices(); /*XXX: = runtime or compiler generated code */
omp_grid_topology_t * __top__ = omp_grid_topology_init_simple(__num_targets__, __top_ndims__);
/* init other infos (dims, periodic, idmaps) of top if needed */
int __num_maps__ = 3; /* u, uold and the coeff */ /* XXX: need compiler output */
/* data copy offloading */
omp_offloading_info_t *__copy_data_off_info__ =
omp_offloading_init_info("data copy", __top__, 1, OMP_OFFLOADING_DATA, __num_maps__, NULL, NULL, 0);
/* stencil kernel offloading */
struct stencil3d_off_args off_args;
off_args.n = n; off_args.m = m; off_args.u = u; off_args.radius = radius; off_args.coeff = coeff; off_args.num_its = num_its;
off_args.uold = uold; off_args.coeff_center = coeff_center; off_args.coeff_dimX = coeff_dimX; off_args.u_dimX = u_dimX; off_args.u_dimY = u_dimY; off_args.u_dimZ = u_dimZ;
omp_offloading_info_t * __off_info__ =
omp_offloading_init_info("stencil3d kernel", __top__, 1, OMP_OFFLOADING_CODE, 0,
stencil3d_omp_mdev_off_launcher, &off_args, 1);
omp_offloading_append_profile_per_iteration(__off_info__, 13*u_dimY*u_dimZ, 7, 1);//NOTE: how to handle this for z?
//printf("data copy off: %X, stencil3d off: %X\n", __copy_data_off_info__, __off_info__);
/* u map info */
omp_data_map_info_t *__u_map_info__ = &__copy_data_off_info__->data_map_info[0];
omp_data_map_init_info("u", __u_map_info__, __copy_data_off_info__, u, 2, sizeof(REAL), OMP_DATA_MAP_TOFROM, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_3d(__u_map_info__, u_dimX, u_dimY, u_dimZ);
/* uold map info */
omp_data_map_info_t *__uold_map_info__ = &__copy_data_off_info__->data_map_info[1];
omp_data_map_init_info("uold", __uold_map_info__, __copy_data_off_info__, uold, 2, sizeof(REAL), OMP_DATA_MAP_TO, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_3d(__uold_map_info__, u_dimX, u_dimY, u_dimZ);
/* coeff map info */
omp_data_map_info_t *__coeff_map_info__ = &__copy_data_off_info__->data_map_info[3];//changed it to 3 added this
omp_data_map_init_info("coeff", __coeff_map_info__, __copy_data_off_info__, coeff, 2, sizeof(REAL), OMP_DATA_MAP_TO, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_3d(__coeff_map_info__, coeff_dimX, coeff_dimX, coeff_dimX);
omp_data_map_dist_init_info(__coeff_map_info__, 0, OMP_DIST_POLICY_DUPLICATE, 0, coeff_dimX, 0);
omp_data_map_dist_init_info(__coeff_map_info__, 1, OMP_DIST_POLICY_DUPLICATE, 0, coeff_dimX, 0);
omp_data_map_dist_init_info(__coeff_map_info__, 2, OMP_DIST_POLICY_DUPLICATE, 0, coeff_dimX, 0);//added this
/**************************************** dist-specific *****************************************/
if (dist_dim == 1) {
if (dist_policy == 1) { /* BLOCK_BLOCK */
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_BLOCK, radius, n, 0);
omp_loop_dist_init_info(__off_info__, 0, OMP_DIST_POLICY_BLOCK, 0, n, 0);
//printf("BLOCK dist policy for arrays and loop dist\n");
} else if (dist_policy == 2) { /* BLOCK_ALIGN */
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_BLOCK, radius, n, 0);
omp_loop_dist_align_with_data_map(__off_info__, 0, 0, __u_map_info__, 0);
//printf("BLOCK dist policy for arrays, and loop dist align with array A row dist\n");
} else if (dist_policy == 3) { /* AUTO_ALIGN */
omp_loop_dist_init_info(__off_info__, 0, OMP_DIST_POLICY_AUTO, 0, n, 0);
omp_data_map_dist_align_with_loop(__u_map_info__, 0, radius, __off_info__, 0);
//printf("AUTO dist policy for loop dist and array align with loops\n");
}
omp_data_map_dist_init_info(__u_map_info__, 1, OMP_DIST_POLICY_DUPLICATE, 0, u_dimY, 0);
omp_map_add_halo_region(__u_map_info__, 0, radius, radius, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, OMP_ALIGNEE_START, __u_map_info__, OMP_ALL_DIMENSIONS);
omp_data_map_dist_init_info(__u_map_info__, 2, OMP_DIST_POLICY_DUPLICATE, 0, u_dimZ, 0); //added this part of code
omp_map_add_halo_region(__u_map_info__, 0, radius, radius, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, OMP_ALIGNEE_START, __u_map_info__, OMP_ALL_DIMENSIONS);
} else if (dist_dim == 2) {
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_DUPLICATE, radius, n, 0);
omp_data_map_dist_init_info(__u_map_info__, 1, OMP_DIST_POLICY_BLOCK, radius, n, 0);
omp_data_map_dist_init_info(__u_map_info__, 2, OMP_DIST_POLICY_BLOCK, radius, n, 0);//added
omp_map_add_halo_region(__u_map_info__, 0, radius, radius, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, 0, __u_map_info__, OMP_ALL_DIMENSIONS);
omp_loop_dist_init_info(__off_info__, 1, OMP_DIST_POLICY_BLOCK, 0, m, 0);
omp_loop_dist_init_info(__off_info__, 2, OMP_DIST_POLICY_BLOCK, 0, k, 0);//added
} else /* dist == 3 */{
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_BLOCK, radius, n, 0);
omp_data_map_dist_init_info(__u_map_info__, 1, OMP_DIST_POLICY_BLOCK, radius, n, 1);
omp_data_map_dist_init_info(__u_map_info__, 2, OMP_DIST_POLICY_BLOCK, radius, n, 1);
omp_map_add_halo_region(__u_map_info__, 0, radius, radius, OMP_DIST_HALO_EDGING_REFLECTING);
omp_map_add_halo_region(__u_map_info__, 1, radius, radius, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, 0, __u_map_info__, OMP_ALL_DIMENSIONS);
omp_loop_dist_init_info(__off_info__, 0, OMP_DIST_POLICY_BLOCK, 0, n, 0);
omp_loop_dist_init_info(__off_info__, 1, OMP_DIST_POLICY_BLOCK, 0, m, 1);
omp_loop_dist_init_info(__off_info__, 2, OMP_DIST_POLICY_BLOCK, 0, k, 1);
}
/************************************************************************************************/
off_init_time = read_timer_ms() - off_init_time;
/*********** NOW notifying helper thread to work on this offload ******************/
#if DEBUG_MSG
printf("=========================================== offloading to %d targets ==========================================\n", __num_target_devices__);
#endif
double off_copyto_time = read_timer_ms();
double start_time = off_copyto_time;
omp_offloading_start(__copy_data_off_info__, 0);
omp_print_map_info(__u_map_info__);
omp_print_map_info(__uold_map_info__);
omp_print_map_info(__coeff_map_info__);
off_copyto_time = read_timer_ms() - off_copyto_time;
// printf("offloading from stencil now\n");
double off_kernel_time = read_timer_ms();
int it;
for (it=0; it< num_runs; it++) omp_offloading_start(__off_info__, it== num_runs -1);
off_kernel_time = (read_timer_ms() - off_kernel_time)/ num_runs;
/* copy back u from each device and free others */
double off_copyfrom_time = read_timer_ms();
omp_offloading_start(__copy_data_off_info__, 1);
off_copyfrom_time = read_timer_ms() - off_copyfrom_time;
double off_total = off_init_time + off_copyto_time + off_copyfrom_time + off_kernel_time;
#if defined (OMP_BREAKDOWN_TIMING)
omp_offloading_info_report_profile(__copy_data_off_info__);
omp_offloading_info_report_profile(__off_info__);
omp_offloading_info_t *infos[2];
infos[0] = __copy_data_off_info__;
infos[1] = __off_info__;
omp_offloading_info_sum_profile(infos, 2, start_time, start_time+off_total);
omp_offloading_info_report_profile(__copy_data_off_info__);
#endif
omp_offloading_fini_info(__copy_data_off_info__);
omp_offloading_fini_info(__off_info__);
omp_grid_topology_fini(__top__);
omp_unified_free(uold);
return off_total;
} |
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
for_nowait_scheduling.c | // RUN: %libomp-compile && env KMP_ABT_NUM_ESS=4 %libomp-run
// REQUIRES: abt
#include "omp_testsuite.h"
#include "bolt_scheduling_util.h"
int test_for_nowait_scheduling() {
int i, vals[4];
memset(vals, 0, sizeof(int) * 4);
timeout_barrier_t barrier;
timeout_barrier_init(&barrier);
#pragma omp parallel num_threads(4)
{
check_num_ess(4);
int tid = omp_get_thread_num();
#pragma omp for nowait
for (i = 0; i < 4; i++) {
if (tid < 2) {
timeout_barrier_wait(&barrier, 4);
}
}
if (tid >= 2) {
// The following barrier must be synchronized with the "for" above.
timeout_barrier_wait(&barrier, 4);
}
vals[omp_get_thread_num()] = 1;
}
for (i = 0; i < 4; i++) {
if (vals[i] != 1) {
printf("vals[%d] == %d\n", i, vals[i]);
return 0;
}
}
return 1;
}
int main() {
int i, num_failed = 0;
for (i = 1; i < REPETITIONS; i++) {
if (!test_for_nowait_scheduling(i)) {
num_failed++;
}
}
return num_failed;
}
|
kernel_cpu.c | #ifdef __cplusplus
extern "C" {
#endif
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// LIBRARIES
//======================================================================================================================================================150
#include <omp.h> // (in path known to compiler) needed by openmp
#include <stdlib.h> // (in path known to compiler) needed by malloc
#include <stdio.h> // (in path known to compiler) needed by printf
#include <math.h> // (in path known to compiler) needed by exp
//======================================================================================================================================================150
// MAIN FUNCTION HEADER
//======================================================================================================================================================150
#include "./../main.h" // (in the main program folder) needed to recognized input variables
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer
//======================================================================================================================================================150
// KERNEL_CPU FUNCTION HEADER
//======================================================================================================================================================150
#include "kernel_cpu.h" // (in the current directory)
//========================================================================================================================================================================================================200
// PLASMAKERNEL_GPU
//========================================================================================================================================================================================================200
void kernel_cpu( par_str par,
dim_str dim,
box_str* box,
FOUR_VECTOR* rv,
fp* qv,
FOUR_VECTOR* fv)
{
//======================================================================================================================================================150
// Variables
//======================================================================================================================================================150
// timer
long long time0;
time0 = get_time();
// timer
long long time1;
long long time2;
long long time3;
long long time4;
// parameters
fp alpha;
fp a2;
// counters
int i, j, k, l;
// home box
long first_i;
FOUR_VECTOR* rA;
FOUR_VECTOR* fA;
// neighbor box
int pointer;
long first_j;
FOUR_VECTOR* rB;
fp* qB;
// common
fp r2;
fp u2;
fp fs;
fp vij;
fp fxij,fyij,fzij;
THREE_VECTOR d;
time1 = get_time();
//======================================================================================================================================================150
// MCPU SETUP
//======================================================================================================================================================150
omp_set_num_threads(dim.cores_arg);
time2 = get_time();
//======================================================================================================================================================150
// INPUTS
//======================================================================================================================================================150
alpha = par.alpha;
a2 = 2.0*alpha*alpha;
time3 = get_time();
//======================================================================================================================================================150
// PROCESS INTERACTIONS
//======================================================================================================================================================150
#pragma omp parallel for \
private(i, j, k) \
private(first_i, rA, fA) \
private(pointer, first_j, rB, qB) \
private(r2, u2, fs, vij, fxij, fyij, fzij, d)
for(l=0; l<dim.number_boxes; l=l+1){
//------------------------------------------------------------------------------------------100
// home box - box parameters
//------------------------------------------------------------------------------------------100
first_i = box[l].offset; // offset to common arrays
//------------------------------------------------------------------------------------------100
// home box - distance, force, charge and type parameters from common arrays
//------------------------------------------------------------------------------------------100
rA = &rv[first_i];
fA = &fv[first_i];
//------------------------------------------------------------------------------------------100
// Do for the # of (home+neighbor) boxes
//------------------------------------------------------------------------------------------100
for (k=0; k<(1+box[l].nn); k++)
{
//----------------------------------------50
// neighbor box - get pointer to the right box
//----------------------------------------50
if(k==0){
pointer = l; // set first box to be processed to home box
}
else{
pointer = box[l].nei[k-1].number; // remaining boxes are neighbor boxes
}
//----------------------------------------50
// neighbor box - box parameters
//----------------------------------------50
first_j = box[pointer].offset;
//----------------------------------------50
// neighbor box - distance, force, charge and type parameters
//----------------------------------------50
rB = &rv[first_j];
qB = &qv[first_j];
//----------------------------------------50
// Do for the # of particles in home box
//----------------------------------------50
for (i=0; i<NUMBER_PAR_PER_BOX; i=i+1){
// do for the # of particles in current (home or neighbor) box
for (j=0; j<NUMBER_PAR_PER_BOX; j=j+1){
// // coefficients
r2 = rA[i].v + rB[j].v - DOT(rA[i],rB[j]);
u2 = a2*r2;
vij= exp(-u2);
fs = 2.*vij;
d.x = rA[i].x - rB[j].x;
d.y = rA[i].y - rB[j].y;
d.z = rA[i].z - rB[j].z;
fxij=fs*d.x;
fyij=fs*d.y;
fzij=fs*d.z;
// forces
fA[i].v += qB[j]*vij;
fA[i].x += qB[j]*fxij;
fA[i].y += qB[j]*fyij;
fA[i].z += qB[j]*fzij;
} // for j
} // for i
} // for k
} // for l
time4 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of CPU/MCPU KERNEL:\n");
printf("%15.12f s, %15.12f % : CPU/MCPU: VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time4-time0) * 100);
printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time4-time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: INPUTS\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time4-time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time4-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time4-time0) / 1000000);
} // main
#ifdef __cplusplus
}
#endif
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-3,4)),ceild(24*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(12*t1+Ny+21,16)),floord(24*t2+Ny+20,16)),floord(24*t1-24*t2+Nz+Ny+19,16));t3++) {
for (t4=max(max(max(0,ceild(3*t1-255,256)),ceild(24*t2-Nz-1020,1024)),ceild(16*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(12*t1+Nx+21,1024)),floord(24*t2+Nx+20,1024)),floord(16*t3+Nx+12,1024)),floord(24*t1-24*t2+Nz+Nx+19,1024));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),16*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),16*t3+14),1024*t4+1022),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
convolution_7x7.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv7x7s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4_t _r00 = vld1q_f32(r0);// 0 1 2 3
float32x4_t _r04 = vld1q_f32(r0 + 4);// 4 5 6 7
float32x4_t _r00n = vld1q_f32(r0 + 8);// 8 9 10 11
float32x4_t _r01 = vextq_f32(_r00, _r04, 1);// 1 2 3 4
float32x4_t _r02 = vextq_f32(_r00, _r04, 2);// 2 3 4 5
float32x4_t _r03 = vextq_f32(_r00, _r04, 3);// 3 4 5 6
float32x4_t _r05 = vextq_f32(_r04, _r00n, 1);// 5 6 7 8
float32x4_t _r06 = vextq_f32(_r04, _r00n, 2);// 6 7 8 9
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r14 = vld1q_f32(r1 + 4);
float32x4_t _r10n = vld1q_f32(r1 + 8);
float32x4_t _r11 = vextq_f32(_r10, _r14, 1);
float32x4_t _r12 = vextq_f32(_r10, _r14, 2);
float32x4_t _r13 = vextq_f32(_r10, _r14, 3);
float32x4_t _r15 = vextq_f32(_r14, _r10n, 1);
float32x4_t _r16 = vextq_f32(_r14, _r10n, 2);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r24 = vld1q_f32(r2 + 4);
float32x4_t _r20n = vld1q_f32(r2 + 8);
float32x4_t _r21 = vextq_f32(_r20, _r24, 1);
float32x4_t _r22 = vextq_f32(_r20, _r24, 2);
float32x4_t _r23 = vextq_f32(_r20, _r24, 3);
float32x4_t _r25 = vextq_f32(_r24, _r20n, 1);
float32x4_t _r26 = vextq_f32(_r24, _r20n, 2);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r34 = vld1q_f32(r3 + 4);
float32x4_t _r30n = vld1q_f32(r3 + 8);
float32x4_t _r31 = vextq_f32(_r30, _r34, 1);
float32x4_t _r32 = vextq_f32(_r30, _r34, 2);
float32x4_t _r33 = vextq_f32(_r30, _r34, 3);
float32x4_t _r35 = vextq_f32(_r34, _r30n, 1);
float32x4_t _r36 = vextq_f32(_r34, _r30n, 2);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r44 = vld1q_f32(r4 + 4);
float32x4_t _r40n = vld1q_f32(r4 + 8);
float32x4_t _r41 = vextq_f32(_r40, _r44, 1);
float32x4_t _r42 = vextq_f32(_r40, _r44, 2);
float32x4_t _r43 = vextq_f32(_r40, _r44, 3);
float32x4_t _r45 = vextq_f32(_r44, _r40n, 1);
float32x4_t _r46 = vextq_f32(_r44, _r40n, 2);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r54 = vld1q_f32(r5 + 4);
float32x4_t _r50n = vld1q_f32(r5 + 8);
float32x4_t _r51 = vextq_f32(_r50, _r54, 1);
float32x4_t _r52 = vextq_f32(_r50, _r54, 2);
float32x4_t _r53 = vextq_f32(_r50, _r54, 3);
float32x4_t _r55 = vextq_f32(_r54, _r50n, 1);
float32x4_t _r56 = vextq_f32(_r54, _r50n, 2);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
float32x4_t _r60 = vld1q_f32(r6);
float32x4_t _r64 = vld1q_f32(r6 + 4);
float32x4_t _r60n = vld1q_f32(r6 + 8);
float32x4_t _r61 = vextq_f32(_r60, _r64, 1);
float32x4_t _r62 = vextq_f32(_r60, _r64, 2);
float32x4_t _r63 = vextq_f32(_r60, _r64, 3);
float32x4_t _r65 = vextq_f32(_r64, _r60n, 1);
float32x4_t _r66 = vextq_f32(_r64, _r60n, 2);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
r6 += 4;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d25}, [%1] \n"// _sum
// "veor q13, q13 \n"// _sum2 = 0;
// "veor q14, q14 \n"// _sum3 = 0;
// "veor q15, q15 \n"// _sum4 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"// q0 = 0 1 2 3
"vmla.f32 q12, q0, d8[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2] \n"// q2 = 4 5 6 7 q3 = 8 9 10 11
"vmul.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"// q1 = 1 2 3 4
"vext.32 q10, q2, q3, #1 \n"// q10= 5 6 7 8
"vmul.f32 q14, q1, d8[1] \n"
"vmul.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"// q8 = 2 3 4 5
"vext.32 q11, q2, q3, #2 \n"// q11= 6 7 8 9
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"// q9 = 3 4 5 6
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d4-d7}, [%3] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #128] \n"
"vld1.f32 {d0-d1}, [%4]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5] \n"
"vmla.f32 q14, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q15, q1, d12[1] \n"
"vmla.f32 q12, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #128] \n"
"vld1.f32 {d0-d1}, [%6]! \n"
"vmla.f32 q12, q0, d8[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6] \n"
"vmla.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q14, q1, d8[1] \n"
"vmla.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #128] \n"
"vld1.f32 {d0-d1}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #128] \n"
"vld1.f32 {d0-d1}, [%8]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%8, #256] \n"
"vld1.f32 {d4-d7}, [%8] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vadd.f32 q13, q13, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"vst1.f32 {d24-d25}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
outptr++;
}
r0 += 6;
r1 += 6;
r2 += 6;
r3 += 6;
r4 += 6;
r5 += 6;
r6 += 6;
}
}
}
}
static void conv7x7s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4x2_t _r00_02461357 = vld2q_f32(r0);
float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8);
float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14
float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15
float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6
float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7
float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8
float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9
float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10
float32x4_t _r05 = vextq_f32(_r01, _r0_9111315, 2);// 5 7 9 11
float32x4_t _r06 = vextq_f32(_r00, _r0_8101214, 3);// 6 8 10 12
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4x2_t _r10_02461357 = vld2q_f32(r1);
float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8);
float32x4_t _r1_8101214 = _r10nx2.val[0];
float32x4_t _r1_9111315 = _r10nx2.val[1];
float32x4_t _r10 = _r10_02461357.val[0];
float32x4_t _r11 = _r10_02461357.val[1];
float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1);
float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1);
float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2);
float32x4_t _r15 = vextq_f32(_r11, _r1_9111315, 2);
float32x4_t _r16 = vextq_f32(_r10, _r1_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4x2_t _r20_02461357 = vld2q_f32(r2);
float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8);
float32x4_t _r2_8101214 = _r20nx2.val[0];
float32x4_t _r2_9111315 = _r20nx2.val[1];
float32x4_t _r20 = _r20_02461357.val[0];
float32x4_t _r21 = _r20_02461357.val[1];
float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1);
float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1);
float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2);
float32x4_t _r25 = vextq_f32(_r21, _r2_9111315, 2);
float32x4_t _r26 = vextq_f32(_r20, _r2_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4x2_t _r30_02461357 = vld2q_f32(r3);
float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8);
float32x4_t _r3_8101214 = _r30nx2.val[0];
float32x4_t _r3_9111315 = _r30nx2.val[1];
float32x4_t _r30 = _r30_02461357.val[0];
float32x4_t _r31 = _r30_02461357.val[1];
float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1);
float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1);
float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2);
float32x4_t _r35 = vextq_f32(_r31, _r3_9111315, 2);
float32x4_t _r36 = vextq_f32(_r30, _r3_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4x2_t _r40_02461357 = vld2q_f32(r4);
float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8);
float32x4_t _r4_8101214 = _r40nx2.val[0];
float32x4_t _r4_9111315 = _r40nx2.val[1];
float32x4_t _r40 = _r40_02461357.val[0];
float32x4_t _r41 = _r40_02461357.val[1];
float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1);
float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1);
float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2);
float32x4_t _r45 = vextq_f32(_r41, _r4_9111315, 2);
float32x4_t _r46 = vextq_f32(_r40, _r4_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4x2_t _r50_02461357 = vld2q_f32(r5);
float32x4x2_t _r50nx2 = vld2q_f32(r5 + 8);
float32x4_t _r5_8101214 = _r50nx2.val[0];
float32x4_t _r5_9111315 = _r50nx2.val[1];
float32x4_t _r50 = _r50_02461357.val[0];
float32x4_t _r51 = _r50_02461357.val[1];
float32x4_t _r52 = vextq_f32(_r50, _r5_8101214, 1);
float32x4_t _r53 = vextq_f32(_r51, _r5_9111315, 1);
float32x4_t _r54 = vextq_f32(_r50, _r5_8101214, 2);
float32x4_t _r55 = vextq_f32(_r51, _r5_9111315, 2);
float32x4_t _r56 = vextq_f32(_r50, _r5_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
float32x4x2_t _r60_02461357 = vld2q_f32(r6);
float32x4x2_t _r60nx2 = vld2q_f32(r6 + 8);
float32x4_t _r6_8101214 = _r60nx2.val[0];
float32x4_t _r6_9111315 = _r60nx2.val[1];
float32x4_t _r60 = _r60_02461357.val[0];
float32x4_t _r61 = _r60_02461357.val[1];
float32x4_t _r62 = vextq_f32(_r60, _r6_8101214, 1);
float32x4_t _r63 = vextq_f32(_r61, _r6_9111315, 1);
float32x4_t _r64 = vextq_f32(_r60, _r6_8101214, 2);
float32x4_t _r65 = vextq_f32(_r61, _r6_9111315, 2);
float32x4_t _r66 = vextq_f32(_r60, _r6_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
r6 += 8;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d26-d27}, [%1] \n"// _sum
// "veor q14, q14 \n"// _sum2 = 0;
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #512] \n"
"vld2.f32 {d0-d3}, [%2]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7
"vmla.f32 q13, q0, d8[0] \n"
"vmul.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%2] \n"// q2 = 8 10 12 14 q3 = 9 11 13 15
"vext.32 q8, q0, q2, #1 \n"// q8 = 2 4 6 8
"vext.32 q9, q1, q3, #1 \n"// q9 = 3 5 7 9
"vmul.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"// q10= 4 6 8 10
"vext.32 q11, q1, q3, #2 \n"// q11= 5 7 9 11
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"// q12= 6 8 10 12
"vmla.f32 q13, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #512] \n"
"vld2.f32 {d0-d3}, [%3]! \n"
"vmla.f32 q14, q0, d12[0] \n"
"vmla.f32 q15, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%3] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q13, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #512] \n"
"vld2.f32 {d0-d3}, [%4]! \n"
"vmla.f32 q15, q0, d8[0] \n"
"vmla.f32 q13, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%4] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #512] \n"
"vld2.f32 {d0-d3}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"vmla.f32 q14, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%5] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #512] \n"
"vld2.f32 {d0-d3}, [%6]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"vmla.f32 q15, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%6] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #512] \n"
"vld2.f32 {d0-d3}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"vmla.f32 q13, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%7] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d14[0] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #512] \n"
"vld2.f32 {d0-d3}, [%8]! \n"
"vmla.f32 q13, q0, d8[0] \n"
"vmla.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%8] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d11[0] \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q13, q13, q14 \n"
"vst1.f32 {d26-d27}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
}
}
}
}
|
update_ops_control_single_target_single.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _USE_SIMD
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#endif
//void single_qubit_control_single_qubit_dense_matrix_gate_single(UINT control_qubit_index, UINT control_value, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim);
//void single_qubit_control_single_qubit_dense_matrix_gate_old_single(UINT control_qubit_index, UINT control_value, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim);
//void single_qubit_control_single_qubit_dense_matrix_gate_old_parallel(UINT control_qubit_index, UINT control_value, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim);
void single_qubit_control_single_qubit_dense_matrix_gate(UINT control_qubit_index, UINT control_value, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
//single_qubit_control_single_qubit_dense_matrix_gate_old_single(control_qubit_index, control_value, target_qubit_index,matrix,state, dim);
//single_qubit_control_single_qubit_dense_matrix_gate_old_parallel(control_qubit_index, control_value, target_qubit_index, matrix, state, dim);
//single_qubit_control_single_qubit_dense_matrix_gate_single(control_qubit_index, control_value, target_qubit_index, matrix, state, dim);
//single_qubit_control_single_qubit_dense_matrix_gate_single_unroll(control_qubit_index, control_value, target_qubit_index, matrix, state, dim);
//single_qubit_control_single_qubit_dense_matrix_gate_single_simd(control_qubit_index, control_value, target_qubit_index, matrix, state, dim);
//single_qubit_control_single_qubit_dense_matrix_gate_parallel_simd(control_qubit_index, control_value, target_qubit_index, matrix, state, dim);
#ifdef _USE_SIMD
#ifdef _OPENMP
UINT threshold = 13;
if (dim < (((ITYPE)1) << threshold)) {
single_qubit_control_single_qubit_dense_matrix_gate_single_simd(control_qubit_index, control_value, target_qubit_index, matrix, state, dim);
}
else {
single_qubit_control_single_qubit_dense_matrix_gate_parallel_simd(control_qubit_index, control_value, target_qubit_index, matrix, state, dim);
}
#else
single_qubit_control_single_qubit_dense_matrix_gate_single_simd(control_qubit_index, control_value, target_qubit_index, matrix, state, dim);
#endif
#else
#ifdef _OPENMP
UINT threshold = 13;
if (dim < (((ITYPE)1) << threshold)) {
single_qubit_control_single_qubit_dense_matrix_gate_single_unroll(control_qubit_index, control_value, target_qubit_index, matrix, state, dim);
}
else {
single_qubit_control_single_qubit_dense_matrix_gate_parallel_unroll(control_qubit_index, control_value, target_qubit_index, matrix, state, dim);
}
#else
single_qubit_control_single_qubit_dense_matrix_gate_single_unroll(control_qubit_index, control_value, target_qubit_index, matrix, state, dim);
#endif
#endif
}
void single_qubit_control_single_qubit_dense_matrix_gate_single_unroll(UINT control_qubit_index, UINT control_value, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1);
const ITYPE low_mask = min_qubit_mask - 1;
const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask;
const ITYPE high_mask = ~(max_qubit_mask - 1);
ITYPE state_index;
if (target_qubit_index == 0) {
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
// fetch values
CTYPE cval0 = state[basis_index];
CTYPE cval1 = state[basis_index+1];
// set values
state[basis_index] = matrix[0] * cval0 + matrix[1] * cval1;
state[basis_index+1] = matrix[2] * cval0 + matrix[3] * cval1;
}
}
else if (control_qubit_index == 0) {
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index_0 = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
ITYPE basis_index_1 = basis_index_0 + target_mask;
// fetch values
CTYPE cval0 = state[basis_index_0];
CTYPE cval1 = state[basis_index_1];
// set values
state[basis_index_0] = matrix[0] * cval0 + matrix[1] * cval1;
state[basis_index_1] = matrix[2] * cval0 + matrix[3] * cval1;
}
}else {
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index_0 = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
ITYPE basis_index_1 = basis_index_0 + target_mask;
// fetch values
CTYPE cval0 = state[basis_index_0];
CTYPE cval1 = state[basis_index_1];
CTYPE cval2 = state[basis_index_0 + 1];
CTYPE cval3 = state[basis_index_1 + 1];
// set values
state[basis_index_0] = matrix[0] * cval0 + matrix[1] * cval1;
state[basis_index_1] = matrix[2] * cval0 + matrix[3] * cval1;
state[basis_index_0 + 1] = matrix[0] * cval2 + matrix[1] * cval3;
state[basis_index_1 + 1] = matrix[2] * cval2 + matrix[3] * cval3;
}
}
}
#ifdef _OPENMP
void single_qubit_control_single_qubit_dense_matrix_gate_parallel_unroll(UINT control_qubit_index, UINT control_value, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1);
const ITYPE low_mask = min_qubit_mask - 1;
const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask;
const ITYPE high_mask = ~(max_qubit_mask - 1);
ITYPE state_index;
if (target_qubit_index == 0) {
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
// fetch values
CTYPE cval0 = state[basis_index];
CTYPE cval1 = state[basis_index + 1];
// set values
state[basis_index] = matrix[0] * cval0 + matrix[1] * cval1;
state[basis_index + 1] = matrix[2] * cval0 + matrix[3] * cval1;
}
}
else if (control_qubit_index == 0) {
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index_0 = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
ITYPE basis_index_1 = basis_index_0 + target_mask;
// fetch values
CTYPE cval0 = state[basis_index_0];
CTYPE cval1 = state[basis_index_1];
// set values
state[basis_index_0] = matrix[0] * cval0 + matrix[1] * cval1;
state[basis_index_1] = matrix[2] * cval0 + matrix[3] * cval1;
}
}
else {
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index_0 = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
ITYPE basis_index_1 = basis_index_0 + target_mask;
// fetch values
CTYPE cval0 = state[basis_index_0];
CTYPE cval1 = state[basis_index_1];
CTYPE cval2 = state[basis_index_0 + 1];
CTYPE cval3 = state[basis_index_1 + 1];
// set values
state[basis_index_0] = matrix[0] * cval0 + matrix[1] * cval1;
state[basis_index_1] = matrix[2] * cval0 + matrix[3] * cval1;
state[basis_index_0 + 1] = matrix[0] * cval2 + matrix[1] * cval3;
state[basis_index_1 + 1] = matrix[2] * cval2 + matrix[3] * cval3;
}
}
}
#endif
#ifdef _USE_SIMD
void single_qubit_control_single_qubit_dense_matrix_gate_single_simd(UINT control_qubit_index, UINT control_value, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1);
const ITYPE low_mask = min_qubit_mask - 1;
const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask;
const ITYPE high_mask = ~(max_qubit_mask - 1);
ITYPE state_index;
if (target_qubit_index == 0) {
__m256d mv00 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[0]), creal(matrix[0]));
__m256d mv01 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[0]), cimag(matrix[0]));
__m256d mv20 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[2]), creal(matrix[2]));
__m256d mv21 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[2]), cimag(matrix[2]));
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
double* ptr = (double*)(state + basis);
__m256d data = _mm256_loadu_pd(ptr);
__m256d data_u0 = _mm256_mul_pd(data, mv00);
__m256d data_u1 = _mm256_mul_pd(data, mv01);
__m256d data_u2 = _mm256_hadd_pd(data_u0, data_u1);
data_u2 = _mm256_permute4x64_pd(data_u2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216
__m256d data_d0 = _mm256_mul_pd(data, mv20);
__m256d data_d1 = _mm256_mul_pd(data, mv21);
__m256d data_d2 = _mm256_hadd_pd(data_d0, data_d1);
data_d2 = _mm256_permute4x64_pd(data_d2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216
__m256d data_r = _mm256_hadd_pd(data_u2, data_d2);
data_r = _mm256_permute4x64_pd(data_r, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216
_mm256_storeu_pd(ptr, data_r);
}
}
else if (control_qubit_index == 0) {
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index_0 = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
ITYPE basis_index_1 = basis_index_0 + target_mask;
// fetch values
CTYPE cval0 = state[basis_index_0];
CTYPE cval1 = state[basis_index_1];
// set values
state[basis_index_0] = matrix[0] * cval0 + matrix[1] * cval1;
state[basis_index_1] = matrix[2] * cval0 + matrix[3] * cval1;
}
}
else {
__m256d mv00 = _mm256_set_pd(-cimag(matrix[0]), creal(matrix[0]), -cimag(matrix[0]), creal(matrix[0]));
__m256d mv01 = _mm256_set_pd(creal(matrix[0]), cimag(matrix[0]), creal(matrix[0]), cimag(matrix[0]));
__m256d mv10 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[1]), creal(matrix[1]));
__m256d mv11 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[1]), cimag(matrix[1]));
__m256d mv20 = _mm256_set_pd(-cimag(matrix[2]), creal(matrix[2]), -cimag(matrix[2]), creal(matrix[2]));
__m256d mv21 = _mm256_set_pd(creal(matrix[2]), cimag(matrix[2]), creal(matrix[2]), cimag(matrix[2]));
__m256d mv30 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[3]), creal(matrix[3]));
__m256d mv31 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[3]), cimag(matrix[3]));
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_0 = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
ITYPE basis_1 = basis_0 + target_mask;
double* ptr0 = (double*)(state + basis_0);
double* ptr1 = (double*)(state + basis_1);
__m256d data0 = _mm256_loadu_pd(ptr0);
__m256d data1 = _mm256_loadu_pd(ptr1);
__m256d data_u2 = _mm256_mul_pd(data0, mv00);
__m256d data_u3 = _mm256_mul_pd(data1, mv10);
__m256d data_u4 = _mm256_mul_pd(data0, mv01);
__m256d data_u5 = _mm256_mul_pd(data1, mv11);
__m256d data_u6 = _mm256_hadd_pd(data_u2, data_u4);
__m256d data_u7 = _mm256_hadd_pd(data_u3, data_u5);
__m256d data_d2 = _mm256_mul_pd(data0, mv20);
__m256d data_d3 = _mm256_mul_pd(data1, mv30);
__m256d data_d4 = _mm256_mul_pd(data0, mv21);
__m256d data_d5 = _mm256_mul_pd(data1, mv31);
__m256d data_d6 = _mm256_hadd_pd(data_d2, data_d4);
__m256d data_d7 = _mm256_hadd_pd(data_d3, data_d5);
__m256d data_r0 = _mm256_add_pd(data_u6, data_u7);
__m256d data_r1 = _mm256_add_pd(data_d6, data_d7);
_mm256_storeu_pd(ptr0, data_r0);
_mm256_storeu_pd(ptr1, data_r1);
}
}
}
#ifdef _OPENMP
void single_qubit_control_single_qubit_dense_matrix_gate_parallel_simd(UINT control_qubit_index, UINT control_value, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1);
const ITYPE low_mask = min_qubit_mask - 1;
const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask;
const ITYPE high_mask = ~(max_qubit_mask - 1);
ITYPE state_index;
if (target_qubit_index == 0) {
__m256d mv00 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[0]), creal(matrix[0]));
__m256d mv01 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[0]), cimag(matrix[0]));
__m256d mv20 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[2]), creal(matrix[2]));
__m256d mv21 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[2]), cimag(matrix[2]));
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
double* ptr = (double*)(state + basis);
__m256d data = _mm256_loadu_pd(ptr);
__m256d data_u0 = _mm256_mul_pd(data, mv00);
__m256d data_u1 = _mm256_mul_pd(data, mv01);
__m256d data_u2 = _mm256_hadd_pd(data_u0, data_u1);
data_u2 = _mm256_permute4x64_pd(data_u2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216
__m256d data_d0 = _mm256_mul_pd(data, mv20);
__m256d data_d1 = _mm256_mul_pd(data, mv21);
__m256d data_d2 = _mm256_hadd_pd(data_d0, data_d1);
data_d2 = _mm256_permute4x64_pd(data_d2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216
__m256d data_r = _mm256_hadd_pd(data_u2, data_d2);
data_r = _mm256_permute4x64_pd(data_r, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216
_mm256_storeu_pd(ptr, data_r);
}
}
else if (control_qubit_index == 0) {
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index_0 = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
ITYPE basis_index_1 = basis_index_0 + target_mask;
// fetch values
CTYPE cval0 = state[basis_index_0];
CTYPE cval1 = state[basis_index_1];
// set values
state[basis_index_0] = matrix[0] * cval0 + matrix[1] * cval1;
state[basis_index_1] = matrix[2] * cval0 + matrix[3] * cval1;
}
}
else {
__m256d mv00 = _mm256_set_pd(-cimag(matrix[0]), creal(matrix[0]), -cimag(matrix[0]), creal(matrix[0]));
__m256d mv01 = _mm256_set_pd(creal(matrix[0]), cimag(matrix[0]), creal(matrix[0]), cimag(matrix[0]));
__m256d mv10 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[1]), creal(matrix[1]));
__m256d mv11 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[1]), cimag(matrix[1]));
__m256d mv20 = _mm256_set_pd(-cimag(matrix[2]), creal(matrix[2]), -cimag(matrix[2]), creal(matrix[2]));
__m256d mv21 = _mm256_set_pd(creal(matrix[2]), cimag(matrix[2]), creal(matrix[2]), cimag(matrix[2]));
__m256d mv30 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[3]), creal(matrix[3]));
__m256d mv31 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[3]), cimag(matrix[3]));
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_0 = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
ITYPE basis_1 = basis_0 + target_mask;
double* ptr0 = (double*)(state + basis_0);
double* ptr1 = (double*)(state + basis_1);
__m256d data0 = _mm256_loadu_pd(ptr0);
__m256d data1 = _mm256_loadu_pd(ptr1);
__m256d data_u2 = _mm256_mul_pd(data0, mv00);
__m256d data_u3 = _mm256_mul_pd(data1, mv10);
__m256d data_u4 = _mm256_mul_pd(data0, mv01);
__m256d data_u5 = _mm256_mul_pd(data1, mv11);
__m256d data_u6 = _mm256_hadd_pd(data_u2, data_u4);
__m256d data_u7 = _mm256_hadd_pd(data_u3, data_u5);
__m256d data_d2 = _mm256_mul_pd(data0, mv20);
__m256d data_d3 = _mm256_mul_pd(data1, mv30);
__m256d data_d4 = _mm256_mul_pd(data0, mv21);
__m256d data_d5 = _mm256_mul_pd(data1, mv31);
__m256d data_d6 = _mm256_hadd_pd(data_d2, data_d4);
__m256d data_d7 = _mm256_hadd_pd(data_d3, data_d5);
__m256d data_r0 = _mm256_add_pd(data_u6, data_u7);
__m256d data_r1 = _mm256_add_pd(data_d6, data_d7);
_mm256_storeu_pd(ptr0, data_r0);
_mm256_storeu_pd(ptr1, data_r1);
}
}
}
#endif
#endif
/*
void single_qubit_control_single_qubit_dense_matrix_gate_old_single(UINT control_qubit_index, UINT control_value, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
// loop varaibles
const ITYPE loop_dim = dim >> 2;
ITYPE state_index;
// mask
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = (1ULL << control_qubit_index) * control_value;
// insert index
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << max_qubit_index;
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create base index
ITYPE basis_c_t0 = state_index;
basis_c_t0 = insert_zero_to_basis_index(basis_c_t0, min_qubit_mask, min_qubit_index);
basis_c_t0 = insert_zero_to_basis_index(basis_c_t0, max_qubit_mask, max_qubit_index);
// flip control
basis_c_t0 ^= control_mask;
// gather index
ITYPE basis_c_t1 = basis_c_t0 ^ target_mask;
// fetch values
CTYPE cval_c_t0 = state[basis_c_t0];
CTYPE cval_c_t1 = state[basis_c_t1];
// set values
state[basis_c_t0] = matrix[0] * cval_c_t0 + matrix[1] * cval_c_t1;
state[basis_c_t1] = matrix[2] * cval_c_t0 + matrix[3] * cval_c_t1;
}
}
#ifdef _OPENMP
void single_qubit_control_single_qubit_dense_matrix_gate_old_parallel(UINT control_qubit_index, UINT control_value, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
// loop varaibles
const ITYPE loop_dim = dim >> 2;
ITYPE state_index;
// mask
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = (1ULL << control_qubit_index) * control_value;
// insert index
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << max_qubit_index;
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create base index
ITYPE basis_c_t0 = state_index;
basis_c_t0 = insert_zero_to_basis_index(basis_c_t0, min_qubit_mask, min_qubit_index);
basis_c_t0 = insert_zero_to_basis_index(basis_c_t0, max_qubit_mask, max_qubit_index);
// flip control
basis_c_t0 ^= control_mask;
// gather index
ITYPE basis_c_t1 = basis_c_t0 ^ target_mask;
// fetch values
CTYPE cval_c_t0 = state[basis_c_t0];
CTYPE cval_c_t1 = state[basis_c_t1];
// set values
state[basis_c_t0] = matrix[0] * cval_c_t0 + matrix[1] * cval_c_t1;
state[basis_c_t1] = matrix[2] * cval_c_t0 + matrix[3] * cval_c_t1;
}
}
#endif
void single_qubit_control_single_qubit_dense_matrix_gate_single(UINT control_qubit_index, UINT control_value, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1);
const ITYPE low_mask = min_qubit_mask - 1;
const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask;
const ITYPE high_mask = ~(max_qubit_mask - 1);
ITYPE state_index;
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index_0 = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ control_mask * control_value;
ITYPE basis_index_1 = basis_index_0 + target_mask;
// fetch values
CTYPE cval0 = state[basis_index_0];
CTYPE cval1 = state[basis_index_1];
// set values
state[basis_index_0] = matrix[0] * cval0 + matrix[1] * cval1;
state[basis_index_1] = matrix[2] * cval0 + matrix[3] * cval1;
}
}
*/
|
TransformController.h | //
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
#pragma once
#include <set>
#include "Transformer.h"
#include "SequenceEnumerator.h"
namespace Microsoft { namespace MSR { namespace CNTK {
// A pair of a transformer and the stream name to which the transformer should be a applied.
struct Transformation
{
TransformerPtr m_transformer;
std::wstring m_streamName;
};
// A class responsible for applying a list of transformers to sequences and stream descriptions.
// Delegates retrieving of sequences to another sequence provider(such as randomizer) and applies transformations after retrieving.
// Usually used by the packer to get next set of sequences.
class TransformController : public SequenceEnumerator
{
public:
TransformController(const std::vector<Transformation>& transformations, SequenceEnumeratorPtr sequenceProvider)
: m_sequenceProvider(sequenceProvider)
{
// Applying transformations to stream descriptions,
// i.e. a transformation can change a stream from dense to sparse.
std::vector<StreamDescriptionPtr> transformedStreams = m_sequenceProvider->GetStreamDescriptions();
for (auto& t : transformations)
{
size_t streamId = GetStreamId(t.m_streamName, transformedStreams);
m_transformations.push_back(std::make_pair(t, streamId));
transformedStreams[streamId] = std::make_shared<StreamDescription>(t.m_transformer->Transform(*transformedStreams[streamId]));
}
m_outputStreams = transformedStreams;
}
// Sets configuration for the current epoch.
// Some transformers can change their config based on the epoch.
virtual void StartEpoch(const EpochConfiguration &config) override
{
assert(m_sequenceProvider != nullptr);
for (auto& t : m_transformations)
{
t.first.m_transformer->StartEpoch(config);
}
m_sequenceProvider->StartEpoch(config);
}
// Description of streams that the transformer provides.
virtual std::vector<StreamDescriptionPtr> GetStreamDescriptions() const override
{
return m_outputStreams;
}
// Gets next sequences up to a maximum count of samples,
// applying transformers to particular streams.
virtual Sequences GetNextSequences(size_t sampleCount) override
{
assert(m_sequenceProvider != nullptr);
Sequences sequences = m_sequenceProvider->GetNextSequences(sampleCount);
if (sequences.m_data.empty())
{
return sequences;
}
#pragma omp parallel for schedule(dynamic)
for (int j = 0; j < sequences.m_data.front().size(); ++j)
{
for (auto& t : m_transformations)
{
sequences.m_data[t.second][j] = t.first.m_transformer->Transform(sequences.m_data[t.second][j]);
}
}
return sequences;
}
private:
size_t GetStreamId(const std::wstring streamName, const std::vector<StreamDescriptionPtr>& streams) const
{
for (const auto& s : streams)
{
if (s->m_name == streamName)
{
return s->m_id;
}
}
assert(false);
LogicError("Unexpected stream specified for transformation.");
}
SequenceEnumeratorPtr m_sequenceProvider;
std::vector<StreamDescriptionPtr> m_outputStreams;
std::vector<std::pair<Transformation, size_t>> m_transformations;
};
}}}
|
DarthTon.h | #ifndef DARTHTON_H
#define DARTHTON_H
// Boyer-Moore-Horspool with wildcards implementation
void FillShiftTable( const uint8_t* pPattern, size_t patternSize, uint8_t wildcard, size_t* bad_char_skip )
{
size_t idx = 0;
size_t last = patternSize - 1;
// Get last wildcard position
for (idx = last; idx > 0 && pPattern[idx] != wildcard; --idx);
size_t diff = last - idx;
if (diff == 0)
diff = 1;
// Prepare shift table
for (idx = 0; idx <= UCHAR_MAX; ++idx)
bad_char_skip[idx] = diff;
for (idx = last - diff; idx < last; ++idx)
bad_char_skip[pPattern[idx]] = last - idx;
}
const void* Search( const uint8_t* pScanPos, size_t scanSize, const uint8_t* pPattern, size_t patternSize, uint8_t wildcard )
{
size_t bad_char_skip[UCHAR_MAX + 1];
const uint8_t* scanEnd = pScanPos + scanSize - patternSize;
intptr_t last = static_cast<intptr_t>(patternSize) - 1;
FillShiftTable( pPattern, patternSize, wildcard, bad_char_skip );
// Search
for (; pScanPos <= scanEnd; pScanPos += bad_char_skip[pScanPos[last]])
{
for (intptr_t idx = last; idx >= 0 ; --idx)
if (pPattern[idx] != wildcard && pScanPos[idx] != pPattern[idx])
goto skip;
else if (idx == 0)
return pScanPos;
skip:;
}
return nullptr;
}
struct DARTH_TON : public BenchBase
{
virtual void init( Tests test )
{
switch (test)
{
case Tests::First:
pattern = "\x45\x43\x45\x55\x33\x9a\xfa\xCC\xCC\xCC\xCC\x45\x68\x21";
break;
case Tests::Second:
pattern = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xbb\xaa\xCC\xCC\xCC\xCC\x45\x68\x21";
break;
default:
break;
}
}
virtual LPVOID runOne( PBYTE baseAddress, DWORD size )
{
return const_cast<LPVOID>(Search( baseAddress, size, reinterpret_cast<const uint8_t*>(pattern), strlen( pattern ), 0xCC ));
}
virtual const char* name() const
{
return "DarthTon";
}
const char* pattern = nullptr;
};
REGISTER( DARTH_TON );
struct PartData
{
int32_t mask = 0;
__m128i needle = { 0 };
};
const void* Search( const uint8_t* data, const uint32_t size, const uint8_t* pattern, const char* mask )
{
const uint8_t* result = nullptr;
auto len = strlen( mask );
auto first = strchr( mask, '?' );
size_t len2 = (first != nullptr) ? (first - mask) : len;
auto firstlen = min( len2, 16 );
intptr_t num_parts = (len < 16 || len % 16) ? (len / 16 + 1) : (len / 16);
PartData parts[4];
for (intptr_t i = 0; i < num_parts; ++i, len -= 16)
{
for (size_t j = 0; j < min( len, 16 ) - 1; ++j)
if (mask[16 * i + j] == 'x')
_bittestandset( (LONG*)&parts[i].mask, j );
parts[i].needle = _mm_loadu_si128( (const __m128i*)(pattern + i * 16) );
}
#pragma omp parallel for
for (intptr_t i = 0; i < static_cast<intptr_t>(size) / 32 - 1; ++i)
{
auto block = _mm256_loadu_si256( (const __m256i*)data + i );
if (_mm256_testz_si256( block, block ))
continue;
auto offset = _mm_cmpestri( parts->needle, firstlen, _mm_loadu_si128( (const __m128i*)(data + i * 32) ), 16, _SIDD_CMP_EQUAL_ORDERED );
if (offset == 16)
{
offset += _mm_cmpestri( parts->needle, firstlen, _mm_loadu_si128( (const __m128i*)(data + i * 32 + 16) ), 16, _SIDD_CMP_EQUAL_ORDERED );
if (offset == 32)
continue;
}
for (intptr_t j = 0; j < num_parts; ++j)
{
auto hay = _mm_loadu_si128( (const __m128i*)(data + (2 * i + j) * 16 + offset) );
auto bitmask = _mm_movemask_epi8( _mm_cmpeq_epi8( hay, parts[j].needle ) );
if ((bitmask & parts[j].mask) != parts[j].mask)
goto next;
}
result = data + 32 * i + offset;
break;
next:;
}
return result;
}
struct DARTH_TON2 : public BenchBase
{
virtual void init( Tests test )
{
switch (test)
{
case Tests::First:
pattern = "\x45\x43\x45\x55\x33\x9a\xfa\x00\x00\x00\x00\x45\x68\x21";
mask = "xxxxxxx????xxx";
break;
case Tests::Second:
pattern = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xbb\xaa\x00\x00\x00\x00\x45\x68\x21";
mask = "xxxxxxxxxxx????xxx";
break;
}
}
virtual LPVOID runOne( PBYTE baseAddress, DWORD size )
{
return const_cast<LPVOID>(Search( baseAddress, size, reinterpret_cast<const uint8_t*>(pattern), mask ));
}
virtual const char* name() const
{
return "DarthTon v2";
}
const char* pattern = nullptr;
const char* mask = nullptr;
};
REGISTER( DARTH_TON2 );
#endif // DARTHTON_H
|
schur_eliminator_impl.h | // Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// TODO(sameeragarwal): row_block_counter can perhaps be replaced by
// Chunk::start ?
#ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
#define CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
// Eigen has an internal threshold switching between different matrix
// multiplication algorithms. In particular for matrices larger than
// EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD it uses a cache friendly
// matrix matrix product algorithm that has a higher setup cost. For
// matrix sizes close to this threshold, especially when the matrices
// are thin and long, the default choice may not be optimal. This is
// the case for us, as the default choice causes a 30% performance
// regression when we moved from Eigen2 to Eigen3.
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#include <algorithm>
#include <map>
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/invert_psd_matrix.h"
#include "ceres/map_util.h"
#include "ceres/schur_eliminator.h"
#include "ceres/scoped_thread_token.h"
#include "ceres/small_blas.h"
#include "ceres/stl_util.h"
#include "ceres/thread_token_provider.h"
#include "Eigen/Dense"
#include "glog/logging.h"
#if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)
#include "ceres/parallel_for.h"
#endif
namespace ceres {
namespace internal {
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::~SchurEliminator() {
STLDeleteElements(&rhs_locks_);
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::Init(
int num_eliminate_blocks,
bool assume_full_rank_ete,
const CompressedRowBlockStructure* bs) {
CHECK_GT(num_eliminate_blocks, 0)
<< "SchurComplementSolver cannot be initialized with "
<< "num_eliminate_blocks = 0.";
num_eliminate_blocks_ = num_eliminate_blocks;
assume_full_rank_ete_ = assume_full_rank_ete;
const int num_col_blocks = bs->cols.size();
const int num_row_blocks = bs->rows.size();
buffer_size_ = 1;
chunks_.clear();
lhs_row_layout_.clear();
int lhs_num_rows = 0;
// Add a map object for each block in the reduced linear system
// and build the row/column block structure of the reduced linear
// system.
lhs_row_layout_.resize(num_col_blocks - num_eliminate_blocks_);
for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
lhs_row_layout_[i - num_eliminate_blocks_] = lhs_num_rows;
lhs_num_rows += bs->cols[i].size;
}
int r = 0;
// Iterate over the row blocks of A, and detect the chunks. The
// matrix should already have been ordered so that all rows
// containing the same y block are vertically contiguous. Along
// the way also compute the amount of space each chunk will need
// to perform the elimination.
while (r < num_row_blocks) {
const int chunk_block_id = bs->rows[r].cells.front().block_id;
if (chunk_block_id >= num_eliminate_blocks_) {
break;
}
chunks_.push_back(Chunk());
Chunk& chunk = chunks_.back();
chunk.size = 0;
chunk.start = r;
int block_row_size = 0;
int buffer_size = 0;
const int e_block_size = bs->cols[chunk_block_id].size;
// Add to the chunk until the first block in the row is
// different than the one in the first row for the chunk.
while (r + chunk.size < num_row_blocks) {
const CompressedRow& row = bs->rows[r + chunk.size];
if (row.cells.front().block_id != chunk_block_id) {
break;
}
// Iterate over the blocks in the row, ignoring the first
// block since it is the one to be eliminated.
for (int c = 1; c < row.cells.size(); ++c) {
const Cell& cell = row.cells[c];
if (InsertIfNotPresent(
&(chunk.buffer_layout), cell.block_id, buffer_size)) {
buffer_size += e_block_size * bs->cols[cell.block_id].size;
}
}
buffer_size_ = std::max(buffer_size, buffer_size_);
block_row_size += row.block.size;
++chunk.size;
}
CHECK_GT(chunk.size, 0);
block_row_size_.push_back(block_row_size);
r += chunk.size;
}
const Chunk& chunk = chunks_.back();
uneliminated_row_begins_ = chunk.start + chunk.size;
// if (num_threads_ > 1) {
// random_shuffle(chunks_.begin(), chunks_.end());
// }
buffer_.reset(new double[buffer_size_ * num_threads_]);
// chunk_outer_product_buffer_ only needs to store e_block_size *
// f_block_size, which is always less than buffer_size_, so we just
// allocate buffer_size_ per thread.
chunk_outer_product_buffer_.reset(new double[buffer_size_ * num_threads_]);
STLDeleteElements(&rhs_locks_);
rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_);
for (int i = 0; i < num_col_blocks - num_eliminate_blocks_; ++i) {
rhs_locks_[i] = new Mutex;
}
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
Eliminate(const BlockSparseMatrix* A,
const double* b,
const double* D,
BlockRandomAccessMatrix* lhs,
double* rhs) {
if (lhs->num_rows() > 0) {
lhs->SetZero();
VectorRef(rhs, lhs->num_rows()).setZero();
}
const CompressedRowBlockStructure* bs = A->block_structure();
const int num_col_blocks = bs->cols.size();
// Add the diagonal to the schur complement.
if (D != NULL) {
#ifdef CERES_USE_OPENMP
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
#endif // CERES_USE_OPENMP
#if !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS))
for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
#else
ParallelFor(context_, num_eliminate_blocks_, num_col_blocks, num_threads_,
[&](int i) {
#endif // !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS))
const int block_id = i - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block_id, block_id,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block_size = bs->cols[i].size;
typename EigenTypes<Eigen::Dynamic>::ConstVectorRef
diag(D + bs->cols[i].position, block_size);
CeresMutexLock l(&cell_info->m);
MatrixRef m(cell_info->values, row_stride, col_stride);
m.block(r, c, block_size, block_size).diagonal()
+= diag.array().square().matrix();
}
}
#if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)
);
#endif // defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)
}
#if !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS))
ThreadTokenProvider thread_token_provider(num_threads_);
#endif // !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS))
#ifdef CERES_USE_OPENMP
// Eliminate y blocks one chunk at a time. For each chunk, compute
// the entries of the normal equations and the gradient vector block
// corresponding to the y block and then apply Gaussian elimination
// to them. The matrix ete stores the normal matrix corresponding to
// the block being eliminated and array buffer_ contains the
// non-zero blocks in the row corresponding to this y block in the
// normal equations. This computation is done in
// ChunkDiagonalBlockAndGradient. UpdateRhs then applies gaussian
// elimination to the rhs of the normal equations, updating the rhs
// of the reduced linear system by modifying rhs blocks for all the
// z blocks that share a row block/residual term with the y
// block. EliminateRowOuterProduct does the corresponding operation
// for the lhs of the reduced linear system.
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
#endif // CERES_USE_OPENMP
#if !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS))
for (int i = 0; i < chunks_.size(); ++i) {
const ScopedThreadToken scoped_thread_token(&thread_token_provider);
const int thread_id = scoped_thread_token.token();
#else
ParallelFor(context_,
0,
int(chunks_.size()),
num_threads_,
[&](int thread_id, int i) {
#endif // !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS))
double* buffer = buffer_.get() + thread_id * buffer_size_;
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
VectorRef(buffer, buffer_size_).setZero();
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
ete(e_block_size, e_block_size);
if (D != NULL) {
const typename EigenTypes<kEBlockSize>::ConstVectorRef
diag(D + bs->cols[e_block_id].position, e_block_size);
ete = diag.array().square().matrix().asDiagonal();
} else {
ete.setZero();
}
FixedArray<double, 8> g(e_block_size);
typename EigenTypes<kEBlockSize>::VectorRef gref(g.get(), e_block_size);
gref.setZero();
// We are going to be computing
//
// S += F'F - F'E(E'E)^{-1}E'F
//
// for each Chunk. The computation is broken down into a number of
// function calls as below.
// Compute the outer product of the e_blocks with themselves (ete
// = E'E). Compute the product of the e_blocks with the
// corresonding f_blocks (buffer = E'F), the gradient of the terms
// in this chunk (g) and add the outer product of the f_blocks to
// Schur complement (S += F'F).
ChunkDiagonalBlockAndGradient(
chunk, A, b, chunk.start, &ete, g.get(), buffer, lhs);
// Normally one wouldn't compute the inverse explicitly, but
// e_block_size will typically be a small number like 3, in
// which case its much faster to compute the inverse once and
// use it to multiply other matrices/vectors instead of doing a
// Solve call over and over again.
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete =
InvertPSDMatrix<kEBlockSize>(assume_full_rank_ete_, ete);
// For the current chunk compute and update the rhs of the reduced
// linear system.
//
// rhs = F'b - F'E(E'E)^(-1) E'b
FixedArray<double, 8> inverse_ete_g(e_block_size);
MatrixVectorMultiply<kEBlockSize, kEBlockSize, 0>(
inverse_ete.data(),
e_block_size,
e_block_size,
g.get(),
inverse_ete_g.get());
UpdateRhs(chunk, A, b, chunk.start, inverse_ete_g.get(), rhs);
// S -= F'E(E'E)^{-1}E'F
ChunkOuterProduct(
thread_id, bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
}
#if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)
);
#endif // defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)
// For rows with no e_blocks, the schur complement update reduces to
// S += F'F.
NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs);
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
BackSubstitute(const BlockSparseMatrix* A,
const double* b,
const double* D,
const double* z,
double* y) {
const CompressedRowBlockStructure* bs = A->block_structure();
#ifdef CERES_USE_OPENMP
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
#endif // CERES_USE_OPENMP
#if !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS))
for (int i = 0; i < chunks_.size(); ++i) {
#else
ParallelFor(context_, 0, int(chunks_.size()), num_threads_, [&](int i) {
#endif // !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS))
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
double* y_ptr = y + bs->cols[e_block_id].position;
typename EigenTypes<kEBlockSize>::VectorRef y_block(y_ptr, e_block_size);
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
ete(e_block_size, e_block_size);
if (D != NULL) {
const typename EigenTypes<kEBlockSize>::ConstVectorRef
diag(D + bs->cols[e_block_id].position, e_block_size);
ete = diag.array().square().matrix().asDiagonal();
} else {
ete.setZero();
}
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[chunk.start + j];
const Cell& e_cell = row.cells.front();
DCHECK_EQ(e_block_id, e_cell.block_id);
FixedArray<double, 8> sj(row.block.size);
typename EigenTypes<kRowBlockSize>::VectorRef(sj.get(), row.block.size) =
typename EigenTypes<kRowBlockSize>::ConstVectorRef
(b + bs->rows[chunk.start + j].block.position, row.block.size);
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
const int f_block_size = bs->cols[f_block_id].size;
const int r_block = f_block_id - num_eliminate_blocks_;
MatrixVectorMultiply<kRowBlockSize, kFBlockSize, -1>(
values + row.cells[c].position, row.block.size, f_block_size,
z + lhs_row_layout_[r_block],
sj.get());
}
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
sj.get(),
y_ptr);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + e_cell.position, row.block.size, e_block_size,
ete.data(), 0, 0, e_block_size, e_block_size);
}
y_block = InvertPSDMatrix<kEBlockSize>(assume_full_rank_ete_, ete)
* y_block;
}
#if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)
);
#endif // defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)
}
// Update the rhs of the reduced linear system. Compute
//
// F'b - F'E(E'E)^(-1) E'b
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
UpdateRhs(const Chunk& chunk,
const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
const double* inverse_ete_g,
double* rhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
int b_pos = bs->rows[row_block_counter].block.position;
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
const Cell& e_cell = row.cells.front();
typename EigenTypes<kRowBlockSize>::Vector sj =
typename EigenTypes<kRowBlockSize>::ConstVectorRef
(b + b_pos, row.block.size);
MatrixVectorMultiply<kRowBlockSize, kEBlockSize, -1>(
values + e_cell.position, row.block.size, e_block_size,
inverse_ete_g, sj.data());
for (int c = 1; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
CeresMutexLock l(rhs_locks_[block]);
MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
values + row.cells[c].position,
row.block.size, block_size,
sj.data(), rhs + lhs_row_layout_[block]);
}
b_pos += row.block.size;
}
}
// Given a Chunk - set of rows with the same e_block, e.g. in the
// following Chunk with two rows.
//
// E F
// [ y11 0 0 0 | z11 0 0 0 z51]
// [ y12 0 0 0 | z12 z22 0 0 0]
//
// this function computes twp matrices. The diagonal block matrix
//
// ete = y11 * y11' + y12 * y12'
//
// and the off diagonal blocks in the Guass Newton Hessian.
//
// buffer = [y11'(z11 + z12), y12' * z22, y11' * z51]
//
// which are zero compressed versions of the block sparse matrices E'E
// and E'F.
//
// and the gradient of the e_block, E'b.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkDiagonalBlockAndGradient(
const Chunk& chunk,
const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete,
double* g,
double* buffer,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
int b_pos = bs->rows[row_block_counter].block.position;
const int e_block_size = ete->rows();
// Iterate over the rows in this chunk, for each row, compute the
// contribution of its F blocks to the Schur complement, the
// contribution of its E block to the matrix EE' (ete), and the
// corresponding block in the gradient vector.
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
if (row.cells.size() > 1) {
EBlockRowOuterProduct(A, row_block_counter + j, lhs);
}
// Extract the e_block, ETE += E_i' E_i
const Cell& e_cell = row.cells.front();
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + e_cell.position, row.block.size, e_block_size,
ete->data(), 0, 0, e_block_size, e_block_size);
// g += E_i' b_i
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
b + b_pos,
g);
// buffer = E'F. This computation is done by iterating over the
// f_blocks for each row in the chunk.
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
const int f_block_size = bs->cols[f_block_id].size;
double* buffer_ptr =
buffer + FindOrDie(chunk.buffer_layout, f_block_id);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + row.cells[c].position, row.block.size, f_block_size,
buffer_ptr, 0, 0, e_block_size, f_block_size);
}
b_pos += row.block.size;
}
}
// Compute the outer product F'E(E'E)^{-1}E'F and subtract it from the
// Schur complement matrix, i.e
//
// S -= F'E(E'E)^{-1}E'F.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkOuterProduct(int thread_id,
const CompressedRowBlockStructure* bs,
const Matrix& inverse_ete,
const double* buffer,
const BufferLayoutType& buffer_layout,
BlockRandomAccessMatrix* lhs) {
// This is the most computationally expensive part of this
// code. Profiling experiments reveal that the bottleneck is not the
// computation of the right-hand matrix product, but memory
// references to the left hand side.
const int e_block_size = inverse_ete.rows();
BufferLayoutType::const_iterator it1 = buffer_layout.begin();
double* b1_transpose_inverse_ete =
chunk_outer_product_buffer_.get() + thread_id * buffer_size_;
// S(i,j) -= bi' * ete^{-1} b_j
for (; it1 != buffer_layout.end(); ++it1) {
const int block1 = it1->first - num_eliminate_blocks_;
const int block1_size = bs->cols[it1->first].size;
MatrixTransposeMatrixMultiply
<kEBlockSize, kFBlockSize, kEBlockSize, kEBlockSize, 0>(
buffer + it1->second, e_block_size, block1_size,
inverse_ete.data(), e_block_size, e_block_size,
b1_transpose_inverse_ete, 0, 0, block1_size, e_block_size);
BufferLayoutType::const_iterator it2 = it1;
for (; it2 != buffer_layout.end(); ++it2) {
const int block2 = it2->first - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[it2->first].size;
CeresMutexLock l(&cell_info->m);
MatrixMatrixMultiply
<kFBlockSize, kEBlockSize, kEBlockSize, kFBlockSize, -1>(
b1_transpose_inverse_ete, block1_size, e_block_size,
buffer + it2->second, e_block_size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
// For rows with no e_blocks, the schur complement update reduces to S
// += F'F. This function iterates over the rows of A with no e_block,
// and calls NoEBlockRowOuterProduct on each row.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
NoEBlockRowsUpdate(const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
BlockRandomAccessMatrix* lhs,
double* rhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const double* values = A->values();
for (; row_block_counter < bs->rows.size(); ++row_block_counter) {
const CompressedRow& row = bs->rows[row_block_counter];
for (int c = 0; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[c].position, row.block.size, block_size,
b + row.block.position,
rhs + lhs_row_layout_[block]);
}
NoEBlockRowOuterProduct(A, row_block_counter, lhs);
}
}
// A row r of A, which has no e_blocks gets added to the Schur
// Complement as S += r r'. This function is responsible for computing
// the contribution of a single row r to the Schur complement. It is
// very similar in structure to EBlockRowOuterProduct except for
// one difference. It does not use any of the template
// parameters. This is because the algorithm used for detecting the
// static structure of the matrix A only pays attention to rows with
// e_blocks. This is becase rows without e_blocks are rare and
// typically arise from regularization terms in the original
// optimization problem, and have a very different structure than the
// rows with e_blocks. Including them in the static structure
// detection will lead to most template parameters being set to
// dynamic. Since the number of rows without e_blocks is small, the
// lack of templating is not an issue.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
NoEBlockRowOuterProduct(const BlockSparseMatrix* A,
int row_block_index,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const CompressedRow& row = bs->rows[row_block_index];
const double* values = A->values();
for (int i = 0; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
DCHECK_GE(block1, 0);
const int block1_size = bs->cols[row.cells[i].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block1,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
CeresMutexLock l(&cell_info->m);
// This multiply currently ignores the fact that this is a
// symmetric outer product.
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[i].position, row.block.size, block1_size,
cell_info->values, r, c, row_stride, col_stride);
}
for (int j = i + 1; j < row.cells.size(); ++j) {
const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
DCHECK_GE(block2, 0);
DCHECK_LT(block1, block2);
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[row.cells[j].block_id].size;
CeresMutexLock l(&cell_info->m);
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[j].position, row.block.size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
// For a row with an e_block, compute the contribition S += F'F. This
// function has the same structure as NoEBlockRowOuterProduct, except
// that this function uses the template parameters.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
EBlockRowOuterProduct(const BlockSparseMatrix* A,
int row_block_index,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const CompressedRow& row = bs->rows[row_block_index];
const double* values = A->values();
for (int i = 1; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
DCHECK_GE(block1, 0);
const int block1_size = bs->cols[row.cells[i].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block1,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
CeresMutexLock l(&cell_info->m);
// block += b1.transpose() * b1;
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[i].position, row.block.size, block1_size,
cell_info->values, r, c, row_stride, col_stride);
}
for (int j = i + 1; j < row.cells.size(); ++j) {
const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
DCHECK_GE(block2, 0);
DCHECK_LT(block1, block2);
const int block2_size = bs->cols[row.cells[j].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
// block += b1.transpose() * b2;
CeresMutexLock l(&cell_info->m);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[j].position, row.block.size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
EliminateUsingBlockQR(const BlockSparseMatrix* A,
const double* b,
const double* D,
BlockRandomAccessMatrix* lhs,
double* rhs) {
if (lhs->num_rows() > 0) {
lhs->SetZero();
VectorRef(rhs, lhs->num_rows()).setZero();
}
const CompressedRowBlockStructure* bs = A->block_structure();
const int num_col_blocks = bs->cols.size();
// Add the diagonal to the schur complement.
if (D != NULL) {
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
const int block_id = i - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block_id, block_id,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block_size = bs->cols[i].size;
typename EigenTypes<Eigen::Dynamic>::ConstVectorRef
diag(D + bs->cols[i].position, block_size);
CeresMutexLock l(&cell_info->m);
MatrixRef m(cell_info->values, row_stride, col_stride);
m.block(r, c, block_size, block_size).diagonal()
+= diag.array().square().matrix();
}
}
}
// Eliminate y blocks one chunk at a time. For each chunk, compute
// the entries of the normal equations and the gradient vector block
// corresponding to the y block and then apply Gaussian elimination
// to them. The matrix ete stores the normal matrix corresponding to
// the block being eliminated and array buffer_ contains the
// non-zero blocks in the row corresponding to this y block in the
// normal equations. This computation is done in
// ChunkDiagonalBlockAndGradient. UpdateRhs then applies gaussian
// elimination to the rhs of the normal equations, updating the rhs
// of the reduced linear system by modifying rhs blocks for all the
// z blocks that share a row block/residual term with the y
// block. EliminateRowOuterProduct does the corresponding operation
// for the lhs of the reduced linear system.
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
for (int i = 0; i < chunks_.size(); ++i) {
#ifdef CERES_USE_OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
double* buffer = buffer_.get() + thread_id * buffer_size_;
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_col_size = bs->cols[e_block_id].size;
const int e_block_row_size = block_row_size_[e_block_id];
VectorRef(buffer, buffer_size_).setZero();
// We are going to be computing
//
// S += F'F - F'E(E'E)^{-1}E'F
//
// for each Chunk. The computation is broken down into a number of
// function calls as below.
// Compute the Q-factor of E.
const double * values = A->values();
Eigen::HouseholderQR<Matrix> qr(e_block_row_size, e_block_col_size);
qr.compute(
ConstMatrixRef(
values + bs->rows[chunk.start].cells.front().position,
e_block_row_size, e_block_col_size));
Matrix eq = qr.householderQ() * Matrix::Identity(e_block_row_size,
e_block_col_size);
// Compute the outer product of the e_blocks with themselves (ete
// = E'E). Compute the product of the e_blocks with the
// corresonding f_blocks (buffer = E'F), the gradient of the terms
// in this chunk (g) and add the outer product of the f_blocks to
// Schur complement (S += F'F).
ChunkDiagonalBlockUsingBlockQR(
chunk, A, b, chunk.start, eq, buffer, lhs);
// For the current chunk compute and update the rhs of the reduced
// linear system.
//
// rhs = F'b - F'E(E'E)^(-1) E'b
// jhh37 : using E(E'E)^(-1)g = EQ * (EQ' * b)
FixedArray<double, 8> eqt_b(e_block_col_size);
FixedArray<double, 8> eqeqt_b(e_block_row_size);
int b_pos = bs->rows[chunk.start].block.position;
MatrixTransposeVectorMultiply<Eigen::Dynamic, kEBlockSize, 0>(
eq.data(),
e_block_row_size,
e_block_col_size,
b + b_pos,
eqt_b.get());
MatrixVectorMultiply<Eigen::Dynamic, kEBlockSize, 0>(
eq.data(),
e_block_row_size,
e_block_col_size,
eqt_b.get(),
eqeqt_b.get());
UpdateRhsUsingBlockQR(chunk, A, b, chunk.start, eqeqt_b.get(), rhs);
// S -= F'E(E'E)^{-1}E'F
ChunkOuterProductUsingBlockQR(bs, eq, buffer, chunk.buffer_layout, lhs);
}
// For rows with no e_blocks, the schur complement update reduces to
// S += F'F.
NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs);
}
// Update the rhs of the reduced linear system. Compute
//
// F'b - F' (Eq Eq') b
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
UpdateRhsUsingBlockQR(const Chunk& chunk,
const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
const double * eqeqt_b,
double* rhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
int b_pos = bs->rows[row_block_counter].block.position;
int eqeqt_b_pos = 0;
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
typename EigenTypes<kRowBlockSize>::Vector sj =
typename EigenTypes<kRowBlockSize>::ConstVectorRef(
b + b_pos,
row.block.size)
- typename EigenTypes<kRowBlockSize>::ConstVectorRef(
eqeqt_b + eqeqt_b_pos,
row.block.size);
for (int c = 1; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
CeresMutexLock l(rhs_locks_[block]);
MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
values + row.cells[c].position,
row.block.size, block_size,
sj.data(), rhs + lhs_row_layout_[block]);
}
b_pos += row.block.size;
eqeqt_b_pos += row.block.size;
}
}
// Given a Chunk - set of rows with the same e_block, e.g. in the
// following Chunk with two rows.
//
// E F
// [ y11 0 0 0 | z11 0 0 0 z51]
// [ y12 0 0 0 | z12 z22 0 0 0]
//
// this function computes twp matrices. The diagonal block matrix
//
// ete = y11 * y11' + y12 * y12'
//
// and the off diagonal blocks in the Guass Newton Hessian.
//
// buffer = [y11'(z11 + z12), y12' * z22, y11' * z51]
//
// which are zero compressed versions of the block sparse matrices E'E
// and E'F.
//
// and the gradient of the e_block, E'b.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkDiagonalBlockUsingBlockQR(
const Chunk& chunk,
const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
const Matrix& eq,
double* buffer,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
int b_pos = bs->rows[row_block_counter].block.position;
int eq_pos = 0;
const int e_block_size = eq.cols();
// Iterate over the rows in this chunk, for each row, compute the
// contribution of its F blocks to the Schur complement, the
// contribution of its E block to the matrix E'E (ete), and the
// corresponding block in the gradient vector.
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
if (row.cells.size() > 1) {
EBlockRowOuterProduct(A, row_block_counter + j, lhs);
}
// buffer = EQ'F. This computation is done by iterating over the
// f_blocks for each row in the chunk.
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
const int f_block_size = bs->cols[f_block_id].size;
double* buffer_ptr =
buffer + FindOrDie(chunk.buffer_layout, f_block_id);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kFBlockSize, 1>(
eq.data() + eq_pos, row.block.size, e_block_size,
values + row.cells[c].position, row.block.size, f_block_size,
buffer_ptr, 0, 0, e_block_size, f_block_size);
}
b_pos += row.block.size;
eq_pos += row.block.size * e_block_size;
}
}
// Compute the outer product F'E(E'E)^{-1}E'F and subtract it from the
// Schur complement matrix, i.e
//
// S -= F'E(E'E)^{-1}E'F.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkOuterProductUsingBlockQR(const CompressedRowBlockStructure* bs,
const Matrix& eq,
const double* buffer,
const BufferLayoutType& buffer_layout,
BlockRandomAccessMatrix* lhs) {
// This is the most computationally expensive part of this
// code. Profiling experiments reveal that the bottleneck is not the
// computation of the right-hand matrix product, but memory
// references to the left hand side.
const int e_block_size = eq.cols();
BufferLayoutType::const_iterator it1 = buffer_layout.begin();
// S(i,j) -= (bi' * eq) (eq' * b_j)
for (; it1 != buffer_layout.end(); ++it1) {
const int block1 = it1->first - num_eliminate_blocks_;
const int block1_size = bs->cols[it1->first].size;
BufferLayoutType::const_iterator it2 = it1;
for (; it2 != buffer_layout.end(); ++it2) {
const int block2 = it2->first - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[it2->first].size;
CeresMutexLock l(&cell_info->m);
MatrixTransposeMatrixMultiply
<kEBlockSize, kFBlockSize, kEBlockSize, kFBlockSize, -1>(
buffer + it1->second, e_block_size, block1_size,
buffer + it2->second, e_block_size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
|
main.c | // By izanbf1803 - http://izanbf.es/
#define TITLE "Conway's game of life - izanbf.es"
#include <stdio.h>
#include <string.h>
#include <SDL2/SDL.h>
#include "screen.h"
void update(screen_t* screen)
{
#pragma omp parallel for
for (int x = 0; x < screen->W; x++) {
for (int y = 0; y < screen->H; y++) {
int n = 0; // Number of neighbours
int to_iterate[8][2] = {
{ x-1 , y+1 },
{ x , y+1 },
{ x+1 , y+1 },
{ x-1 , y },
{ x+1 , y },
{ x-1 , y-1 },
{ x , y-1 },
{ x+1 , y-1 }
};
for (int i = 0; i < 8; i++) {
if (to_iterate[i][0] >= 0 && to_iterate[i][0] < screen->W
&& to_iterate[i][1] >= 0 && to_iterate[i][1] < screen->H
&& screen->pixels[to_iterate[i][0]][to_iterate[i][1]] == 1)
{
n++;
}
}
if (screen->pixels[x][y]) {
screen->pixels_next[x][y] = (n == 2) || (n == 3);
}
else {
screen->pixels_next[x][y] = (n == 3);
}
}
}
#pragma omp parallel for
for (int x = 0; x < screen->W; x++) { // Copy temp data to real pointer (used on draw())
memcpy(screen->pixels[x], screen->pixels_next[x], screen->H * sizeof(unsigned char));
}
}
void draw(screen_t* screen, SDL_Renderer* renderer)
{
for (int x = 0; x < screen->W; x++) {
for (int y = 0; y < screen->H; y++) {
if (screen->pixels[x][y]) {
SDL_Rect rect;
rect.x = x * screen->point_size;
rect.y = y * screen->point_size;
rect.w = screen->point_size;
rect.h = screen->point_size;
SDL_RenderFillRect(renderer, &rect);
}
}
}
}
int main(int argc, char** argv) // Inizialize all values
{
SDL_Init(SDL_INIT_VIDEO);
screen_t* screen = init_game(argc, argv);
SDL_Window* win = SDL_CreateWindow(TITLE, (screen->info.w >> 1)-(screen->W >> 1), (screen->info.h >> 1)-(screen->H >> 1),
screen->W * screen->point_size, screen->H * screen->point_size, SDL_WINDOW_SHOWN);
SDL_Renderer* renderer = SDL_CreateRenderer(win, -1, SDL_RENDERER_ACCELERATED);
SDL_Event ev;
if (argc < 3 || (argv[1][0] == '.' && argv[2][0] == '.'))
SDL_SetWindowFullscreen(win, SDL_WINDOW_FULLSCREEN_DESKTOP);
unsigned char quit = 0, pause = 0, mouseDownLeft = 0, mouseDownRight = 0;
while (!quit) {
while (SDL_PollEvent(&ev)) {
switch (ev.type) {
case SDL_QUIT:
quit = 1;
break;
case SDL_KEYDOWN:
switch (ev.key.keysym.sym) {
case SDLK_ESCAPE:
case SDLK_q:
quit = 1;
break;
case SDLK_p:
pause = !pause;
break;
case SDLK_DOWN:
screen->delay <<= 1;
if (screen->delay > 500) screen->delay = 500;
break;
case SDLK_UP:
screen->delay >>= 1;
if (screen->delay < 1) screen->delay = 1;
break;
}
break;
case SDL_MOUSEBUTTONDOWN:
if (ev.button.button == SDL_BUTTON_LEFT) mouseDownLeft = 1;
if (ev.button.button == SDL_BUTTON_RIGHT) mouseDownRight = 1;
break;
case SDL_MOUSEBUTTONUP:
if (ev.button.button == SDL_BUTTON_LEFT) mouseDownLeft = 0;
if (ev.button.button == SDL_BUTTON_RIGHT) mouseDownRight = 0;
break;
case SDL_MOUSEMOTION: {
int x_ = ev.button.x / screen->point_size;
int y_ = ev.button.y / screen->point_size;
if (x_ < screen->W && y_ < screen->H) {
if (mouseDownLeft) {
screen->pixels[x_][y_] = 1;
}
else if (mouseDownRight) {
screen->pixels[x_][y_] = 0;
}
}
break;
}
}
}
SDL_RenderClear(renderer);
SDL_SetRenderDrawColor(renderer, 255, 255, 255, 1); // Set color = white
if (!pause) update(screen);
draw(screen, renderer);
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 1); // Reset color to black
SDL_RenderPresent(renderer);
if (pause) {
SDL_Delay(1);
}
else {
SDL_Delay(screen->delay);
}
}
end_game(screen); // Free all screen allocations
SDL_DestroyWindow(win);
SDL_Quit();
return 0;
} |
ch-placement-benchmark.c | /*
* Copyright (C) 2013 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*
*/
#include <string.h>
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <limits.h>
#include <sys/time.h>
#include "ch-placement-oid-gen.h"
#include "ch-placement.h"
#ifdef CH_ENABLE_CRUSH
#include "ch-placement-crush.h"
#endif
#include "comb.h"
struct options
{
unsigned int num_servers;
unsigned int num_objs;
unsigned int replication;
char* placement;
unsigned int virt_factor;
char* comb_name;
};
struct comb_stats {
unsigned long count;
unsigned long bytes;
};
static int comb_cmp (const void *a, const void *b);
static int usage (char *exename);
static struct options *parse_args(int argc, char *argv[]);
static double Wtime(void)
{
struct timeval t;
gettimeofday(&t, NULL);
return((double)t.tv_sec + (double)(t.tv_usec) / 1000000);
}
#ifdef CH_ENABLE_CRUSH
#include <hash.h>
static int setup_crush(struct options *ig_opts,
struct crush_map **map, __u32 **weight, int *n_weight)
{
struct crush_bucket* bucket;
int i;
int *items;
int *weights;
int ret;
int id;
struct crush_rule* rule;
*n_weight = ig_opts->num_servers;
*weight = malloc(sizeof(**weight)*ig_opts->num_servers);
weights = malloc(sizeof(*weights)*ig_opts->num_servers);
items = malloc(sizeof(*items) * ig_opts->num_servers);
if(!(*weight) || !weights || !items || !map)
{
return(-1);
}
for(i=0; i< ig_opts->num_servers; i++)
{
items[i] = i;
weights[i] = 0x10000;
(*weight)[i] = 0x10000;
}
*map = crush_create();
assert(*map);
if(strcmp(ig_opts->placement, "crush-vring") == 0)
#ifdef CH_ENABLE_CRUSH_VRING
bucket = crush_make_bucket(*map, CRUSH_BUCKET_VRING, CRUSH_HASH_DEFAULT, 1,
ig_opts->num_servers, items, weights);
#else
assert(0);
#endif
else
bucket = crush_make_bucket(*map, CRUSH_BUCKET_STRAW, CRUSH_HASH_DEFAULT, 1,
ig_opts->num_servers, items, weights);
assert(bucket);
ret = crush_add_bucket(*map, -2, bucket, &id);
assert(ret == 0);
crush_finalize(*map);
rule = crush_make_rule(3, 0, 1, 1, 10);
assert(rule);
crush_rule_set_step(rule, 0, CRUSH_RULE_TAKE, id, 0);
crush_rule_set_step(rule, 1, CRUSH_RULE_CHOOSELEAF_FIRSTN, 8, 0);
crush_rule_set_step(rule, 2, CRUSH_RULE_EMIT, 0, 0);
ret = crush_add_rule(*map, rule, 0);
assert(ret == 0);
return(0);
}
#endif
int main(
int argc,
char **argv)
{
struct options *ig_opts = NULL;
unsigned long total_byte_count = 0;
unsigned long total_obj_count = 0;
struct obj* total_objs = NULL;
unsigned int i;
double t1, t2;
struct ch_placement_instance *instance;
int fd;
struct comb_stats *cs;
uint64_t num_combs;
unsigned long comb_tmp[CH_MAX_REPLICATION];
int ret;
#ifdef CH_ENABLE_CRUSH
struct crush_map *map;
__u32 *weight;
int n_weight;
#endif
ig_opts = parse_args(argc, argv);
if(!ig_opts)
{
usage(argv[0]);
return(-1);
}
if (ig_opts->comb_name){
/* set up state to count replica combinations and store results */
fd = open(ig_opts->comb_name, O_WRONLY|O_CREAT|O_EXCL,
S_IRUSR|S_IWUSR|S_IRGRP);
if(fd < 0) {
perror("open");
return(-1);
}
num_combs = choose(ig_opts->num_servers, ig_opts->replication);
cs = calloc(num_combs, sizeof(*cs));
assert(cs);
printf("# Total possible combinations for %u servers and %u replication: %lu\n",
ig_opts->num_servers, ig_opts->replication, num_combs);
printf("# Combo state metrics consuming %lu MiB of memory\n", (num_combs*sizeof(*cs))/(1024*1024));
}
if(strcmp(ig_opts->placement, "crush") == 0 ||
strcmp(ig_opts->placement, "crush-vring") == 0)
{
#ifdef CH_ENABLE_CRUSH
ret = setup_crush(ig_opts, &map, &weight, &n_weight);
if(ret < 0)
{
fprintf(stderr, "Error: failed to set up CRUSH.\n");
return(-1);
}
instance = ch_placement_initialize_crush(map, weight, n_weight);
#else
fprintf(stderr, "Error: not compiled with CRUSH support.\n");
#endif
}
else
{
instance = ch_placement_initialize(ig_opts->placement,
ig_opts->num_servers,
ig_opts->virt_factor,
0);
}
/* generate random set of objects for testing */
printf("# Generating random object IDs...\n");
oid_gen("random", instance, ig_opts->num_objs, ULONG_MAX,
8675309, ig_opts->replication, ig_opts->num_servers,
NULL,
&total_byte_count, &total_obj_count, &total_objs);
printf("# Done.\n");
printf("# Object population consuming approximately %lu MiB of memory.\n", (ig_opts->num_objs*sizeof(*total_objs))/(1024*1024));
assert(total_obj_count == ig_opts->num_objs);
sleep(1);
printf("# Calculating placement for each object ID...\n");
/* run placement benchmark */
t1 = Wtime();
#pragma omp parallel for
for(i=0; i<ig_opts->num_objs; i++)
{
ch_placement_find_closest(instance, total_objs[i].oid, ig_opts->replication, total_objs[i].server_idxs);
/* compute the index corresponding to this combination of servers */
if (ig_opts->comb_name){
memcpy(comb_tmp, total_objs[i].server_idxs,
ig_opts->replication*sizeof(*comb_tmp));
rev_ins_sort(ig_opts->replication, comb_tmp);
uint64_t idx = comb_index(ig_opts->replication, comb_tmp);
cs[idx].count++;
cs[idx].bytes += total_objs[i].size;
}
}
t2 = Wtime();
printf("# Done.\n");
if(!ig_opts->comb_name)
{
printf("# <objects>\t<replication>\t<servers>\t<virt_factor>\t<algorithm>\t<time (s)>\t<rate oids/s>\n");
printf("%u\t%d\t%u\t%u\t%s\t%f\t%f\n",
ig_opts->num_objs,
ig_opts->replication,
ig_opts->num_servers,
ig_opts->virt_factor,
ig_opts->placement,
t2-t1,
(double)ig_opts->num_objs/(t2-t1));
}
else
{
printf("# NOTE: computational performance not shown.\n");
printf("# Calculating combinations and outputing to %s.\n", ig_opts->comb_name);
}
/* we don't need the global list any more */
free(total_objs);
total_obj_count = 0;
total_byte_count = 0;
/* print out the counts of used combinations */
if (ig_opts->comb_name){
int sz = 1<<20;
char *buf = malloc(sz);
int written = 0;
uint64_t total = 0;
uint64_t num_zeros;
printf("Sorting/writing server combinations\n");
qsort(cs, num_combs, sizeof(*cs), comb_cmp);
/* find the number of 0 entries - we aren't printing them */
for (num_zeros = 0;
cs[num_zeros].count == 0 && num_zeros < num_combs;
num_zeros++);
/* print the header - the number of possible combinations and the
* number of non-zero entries */
written = snprintf(buf+written, sz, "%lu %lu\n", num_combs,
num_combs-num_zeros);
assert(written < sz);
/* start the counter where we left off */
total = num_zeros;
while (total < num_combs){
int w = snprintf(buf+written, sz-written, "%lu %lu\n",
cs[total].count, cs[total].bytes);
if (w >= sz-written){
ret = write(fd, buf, written);
assert(ret == written);
written=0;
}
else{
written += w;
total++;
}
}
if (written > 0){
ret = write(fd, buf, written);
assert(ret == written);
}
close(fd);
}
return(0);
}
static int usage (char *exename)
{
fprintf(stderr, "Usage: %s [options]\n", exename);
fprintf(stderr, " -s <number of servers>\n");
fprintf(stderr, " -o <number of objects>\n");
fprintf(stderr, " -r <replication factor>\n");
fprintf(stderr, " -p <placement algorithm>\n");
fprintf(stderr, " -v <virtual nodes per physical node>\n");
fprintf(stderr, " -c <output file for combinatorial statistics>\n");
exit(1);
}
static struct options *parse_args(int argc, char *argv[])
{
struct options *opts = NULL;
int ret = -1;
int one_opt = 0;
opts = (struct options*)malloc(sizeof(*opts));
if(!opts)
return(NULL);
memset(opts, 0, sizeof(*opts));
while((one_opt = getopt(argc, argv, "s:o:r:hp:v:c:")) != EOF)
{
switch(one_opt)
{
case 's':
ret = sscanf(optarg, "%u", &opts->num_servers);
if(ret != 1)
return(NULL);
break;
case 'o':
ret = sscanf(optarg, "%u", &opts->num_objs);
if(ret != 1)
return(NULL);
break;
case 'v':
ret = sscanf(optarg, "%u", &opts->virt_factor);
if(ret != 1)
return(NULL);
break;
case 'r':
ret = sscanf(optarg, "%u", &opts->replication);
if(ret != 1)
return(NULL);
break;
case 'p':
opts->placement = strdup(optarg);
if(!opts->placement)
return(NULL);
break;
case 'c':
opts->comb_name = strdup(optarg);
if(!opts->comb_name)
return(NULL);
break;
case '?':
usage(argv[0]);
exit(1);
}
}
if(opts->replication < 2)
return(NULL);
if(opts->num_servers < (opts->replication+1))
return(NULL);
if(opts->num_objs < 1)
return(NULL);
if(opts->virt_factor < 1)
return(NULL);
if(!opts->placement)
return(NULL);
assert(opts->replication <= CH_MAX_REPLICATION);
return(opts);
}
static int comb_cmp (const void *a, const void *b){
unsigned long au = ((struct comb_stats*)a)->count;
unsigned long bu = ((struct comb_stats*)b)->count;
int rtn;
if (au < bu) rtn = -1;
else if (au == bu) rtn = 0;
else rtn = 1;
return rtn;
}
/*
* Local variables:
* c-indent-level: 4
* c-basic-offset: 4
* End:
*
* vim: ft=c ts=8 sts=4 sw=4 expandtab
*/
|
scrypt_fmt.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2013 by Solar Designer
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*/
#include <stdio.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "escrypt/crypto_scrypt.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "base64_convert.h"
#include "memdbg.h"
#define FORMAT_LABEL "scrypt"
#define FORMAT_NAME ""
#define FMT_CISCO9 "$9$"
#define FMT_SCRYPTKDF "$ScryptKDF.pm$"
#ifdef __XOP__
#define ALGORITHM_NAME "Salsa20/8 128/128 XOP"
#elif defined(__AVX__)
#define ALGORITHM_NAME "Salsa20/8 128/128 AVX"
#elif defined(__SSE2__)
#define ALGORITHM_NAME "Salsa20/8 128/128 SSE2"
#else
#define ALGORITHM_NAME "Salsa20/8 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT " (16384, 8, 1)"
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 256
#define BINARY_ALIGN 1
#define SALT_SIZE BINARY_SIZE
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$7$C6..../....SodiumChloride$kBGj9fHznVYFQMEn/qDCfrDevf9YDtcDdKvEqHJLV8D", "pleaseletmein"},
{"$7$C6..../....\x01\x09\x0a\x0d\x20\x7f\x80\xff$b7cKqzsQk7txdc9As1WZBHjUPNWQWJW8A.UUUTA5eD1", "\x01\x09\x0a\x0d\x20\x7f\x80\xff"},
{"$7$2/..../....$rNxJWVHNv/mCNcgE/f6/L4zO6Fos5c2uTzhyzoisI62", ""},
{"$7$86....E....NaCl$xffjQo7Bm/.SKRS4B2EuynbOLjAmXU5AbDbRXhoBl64", "password"},
// cisco type 9 hashes. . They are $7$C/..../.... type (N=16384, r=1, p=1) different base-64 (same as WPA). salt used RAW
{"$9$nhEmQVczB7dqsO$X.HsgL6x1il0RxkOSSvyQYwucySCt7qFm4v7pqCxkKM", "cisco"},
{"$9$cvWdfQlRRDKq/U$VFTPha5VHTCbSgSUAo.nPoh50ZiXOw1zmljEjXkaq1g", "123456"},
{"$9$X9fA8mypebLFVj$Klp6X9hxNhkns0kwUIinvLRSIgWOvCwDhVTZqjsycyU", "JtR"},
// 3rd type ScryptKDF.pm format (we saw this in CMIYC 2013)
// Generate in perl with scrypt_hash($_[1],$salt,1<<$N,$r,$p,$bytes)
// to put into proper format, we mime->raw the salt and mime->cryptBS the hash hash, and fixup $N,$r,$p
// For this hash we replace the default ':' chars in the hash with '*' so they will end up as 1
// field, and change the SCRYPT into $ScryptKDF.pm$. So this hash
// SCRYPT:16384:8:1:VHRuaXZOZ05INWJs:JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0A=
// gets change into (by ScryptKDF2john)
// $ScryptKDF.pm$16384*8*1*VHRuaXZOZ05INWJs*JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0A=
// and then in prepare, this becomes (which is canonical for this format)
// $7$C6..../....TtnivNgNH5bl$acXnAzE8oVzGwW9Tlu6iw7fq021J/1sZmEKhcLBrT02
{"$ScryptKDF.pm$16384*8*1*bjZkemVmZ3lWVi42*cmBflTPsqGIbg9ZIJRTQdbic8OCUH+904TFmNPBkuEA=","test123"},
{"$ScryptKDF.pm$16384*8*1*VlVYUzBhQmlNbk5J*bJhm6VUS2UQRwMRqLTvSsljDeq193Ge4aqQDtb94bKg=","hello"},
{"$ScryptKDF.pm$16384*8*1*VHRuaXZOZ05INWJs*JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0BhlHpZJ3J2jcozCDM7t+sfjkgQ894R+f+ldVWM5atlkA==","password"},
{NULL}
};
// from crypt_scrypt-common.c (removed static from that file on these 3 functions)
extern const uint8_t * decode64_uint32(uint32_t * dst, uint32_t dstbits, const uint8_t * src);
extern uint8_t * encode64_uint32(uint8_t * dst, size_t dstlen, uint32_t src, uint32_t srcbits);
extern int decode64_one(uint32_t * dst, uint8_t src);
static int max_threads;
static escrypt_local_t *local;
static char saved_salt[SALT_SIZE];
static struct {
char key[PLAINTEXT_LENGTH + 1];
char out[BINARY_SIZE];
} *buffer;
static void init(struct fmt_main *self)
{
int i;
#ifdef _OPENMP
max_threads = omp_get_max_threads();
self->params.min_keys_per_crypt *= max_threads;
self->params.max_keys_per_crypt *= max_threads;
#else
max_threads = 1;
#endif
local = mem_alloc(sizeof(*local) * max_threads);
for (i = 0; i < max_threads; i++)
escrypt_init_local(&local[i]);
buffer = mem_alloc(sizeof(*buffer) * self->params.max_keys_per_crypt);
}
static char N_to_c(int N) {
int b=0;
while (N>>=1) ++b;
return itoa64[b];
}
static char *prepare(char *fields[10], struct fmt_main *self)
{
static char Buf[256];
char tmp[512], tmp2[512], tmp4[256], tmp5[6], tmp6[6], *cp, *cp2;
int N, r, p;
if (!strncmp(fields[1], FMT_CISCO9, sizeof(FMT_CISCO9)-1)) {
// cisco type 9 hashes. scrypt params: N=16384, r=1, p=1 hash in crypt format. Change it to CryptBS.
// salt is 14 byte RAW, we can use it as is.
//from: {"$9$nhEmQVczB7dqsO$X.HsgL6x1il0RxkOSSvyQYwucySCt7qFm4v7pqCxkKM", "cisco"},
//to: {"$7$C/..../....nhEmQVczB7dqsO$AG.yl8LDCkiErlh4ttizmxYCXSiXYrNY6vKmLDKj/P4", "cisco"},
if (strlen(fields[1]) != 4+14+43)
return fields[1];
N=1<<14; r=1; p=1;
encode64_uint32((uint8_t*)tmp5, sizeof(tmp5), r, 30);
tmp5[5]=0;
encode64_uint32((uint8_t*)tmp6, sizeof(tmp6), p, 30);
tmp6[5]=0;
sprintf (Buf, "$7$%c%s%s%14.14s$%s", N_to_c(N), tmp5, tmp6, &(fields[1][3]),
base64_convert_cp(&(fields[1][3+14+1]), e_b64_crypt, 43, tmp, e_b64_cryptBS, sizeof(tmp), flg_Base64_NO_FLAGS));
}
else if (!strncmp(fields[1], FMT_SCRYPTKDF, sizeof(FMT_SCRYPTKDF)-1))
{
// ScryptKDF.pm (perl) format scrypt, generated by: scrypt_hash($_[1],$salt,$N,$r,$p,$bytes); Since N, r, p
// AND bytes are variable, we have to handle computing all of them. NOTE, we may have to make changes to
// the crypto_scrypt-common.c to handle the variable number of bytes.
// to put into proper format, we mime->raw the salt and mime->cryptBS the hash hash, and fixup $N,$r,$p
//from: {"$ScryptKDF.pm$*16384*8*1*VHRuaXZOZ05INWJs*JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0A=","password"},
//to: {"$7$C6..../....TtnivNgNH5bl$acXnAzE8oVzGwW9Tlu6iw7fq021J/1sZmEKhcLBrT02","password"},
int N, r, p;
if (strlen(fields[1]) > sizeof(tmp)+sizeof(FMT_SCRYPTKDF)-1)
return fields[1];
strcpy(tmp, &fields[1][sizeof(FMT_SCRYPTKDF)-1]);
cp = strtokm(tmp, "*");
if (!cp || !isdec(cp)) return fields[1];
N = atoi(cp);
cp = strtokm(NULL, "*");
if (!cp || !isdec(cp)) return fields[1];
r = atoi(cp);
cp = strtokm(NULL, "*");
if (!cp || !isdec(cp)) return fields[1];
p = atoi(cp);
cp = strtokm(NULL, "*");
if (!cp)
return fields[1];
cp2 = strtokm(NULL, "*");
if (!cp2)
return fields[1];
if (base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT) != strlen(cp))
return fields[1];
if (base64_valid_length(cp2, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT) != strlen(cp2))
return fields[1];
encode64_uint32((uint8_t*)tmp5, sizeof(tmp5), r, 30);
tmp5[5]=0;
encode64_uint32((uint8_t*)tmp6, sizeof(tmp6), p, 30);
tmp6[5]=0;
memset(tmp4, 0, sizeof(tmp4));
base64_convert_cp(cp, e_b64_mime, strlen(cp), tmp4, e_b64_raw, sizeof(tmp4), flg_Base64_NO_FLAGS);
memset(tmp2, 0, sizeof(tmp2));
base64_convert_cp(cp2, e_b64_mime, strlen(cp2), tmp2, e_b64_cryptBS, sizeof(tmp2),flg_Base64_NO_FLAGS);
cp = &tmp2[strlen(tmp2)-1];
while (cp > tmp2 && *cp == '.') *cp-- = 0;
cp = &tmp4[strlen(tmp)-1];
while (cp > tmp4 && *cp == '.') *cp-- = 0;
sprintf (Buf, "$7$%c%s%s%s$%s", N_to_c(N), tmp5, tmp6, tmp4, tmp2);
} else
return fields[1];
return Buf;
}
static void done(void)
{
int i;
for (i = 0; i < max_threads; i++)
escrypt_free_local(&local[i]);
MEM_FREE(local);
MEM_FREE(buffer);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
int length;
unsigned tmp;
if (strncmp(ciphertext, "$7$", 3))
return 0;
for (p = ciphertext + 3; p < ciphertext + (3 + 1 + 5 + 5); p++)
if (atoi64[ARCH_INDEX(*p)] == 0x7F)
return 0;
p = strrchr(ciphertext, '$');
if (!p)
return 0;
if (p - ciphertext > BINARY_SIZE - (1 + 43))
return 0;
++p;
length = base64_valid_length(p, e_b64_cryptBS, flg_Base64_NO_FLAGS);
decode64_one(&tmp, ciphertext[3]);
if (!tmp)
return 0;
decode64_uint32(&tmp, 30, (const uint8_t *)&ciphertext[4]);
if (!tmp)
return 0;
decode64_uint32(&tmp, 30, (const uint8_t *)&ciphertext[4+5]);
if (!tmp)
return 0;
// we want the hash to use 32 bytes OR more. 43 base64 bytes is 32 raw bytes
return p[length]==0 && length >= 43;
}
static void *get_binary(char *ciphertext)
{
static char out[BINARY_SIZE];
strncpy(out, ciphertext, sizeof(out)); /* NUL padding is required */
return out;
}
static void *get_salt(char *ciphertext)
{
static char out[SALT_SIZE];
char *cp;
/* NUL padding is required */
memset(out, 0, sizeof(out));
if (strlen(ciphertext) > SALT_SIZE-1)
memcpy(out, ciphertext, SALT_SIZE-1);
else
strcpy(out, ciphertext);
cp = strchr(&out[8], '$');
while (cp && *cp) {
*cp++ = 0;
}
return out;
}
#define H(s, i) \
((int)(unsigned char)(atoi64[ARCH_INDEX((s)[(i)])] ^ (s)[(i) - 1]))
/*
* original Hx() macros simple looked at length-2 (last byte, and last byte -2)
* now we look at bytes 40 and 38 from the hash, so that longer hashes can
* be compared to shorter ones. The last byte may be different, so we
* do NOT use that one. This new method works for any number of bytes in
* the scrypt 32 or more.
#define H0(s) \
int i = strlen(s) - 2; \
return i > 0 ? H((s), i) & 0xF : 0
*/
#define H0(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 0 ? H((s), i) & 0xF : 0
#define H1(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 4)) & 0xFF : 0
#define H2(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 6)) & 0xFFF : 0
#define H3(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 4 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \
(H((s), i - 4) << 10)) & 0xFFFF : 0
#define H4(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 6 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \
(H((s), i - 4) << 10) ^ (H((s), i - 6) << 15)) & 0xFFFFF : 0
static int binary_hash_0(void *binary)
{
H0((char *)binary);
}
static int binary_hash_1(void *binary)
{
H1((char *)binary);
}
static int binary_hash_2(void *binary)
{
H2((char *)binary);
}
static int binary_hash_3(void *binary)
{
H3((char *)binary);
}
static int binary_hash_4(void *binary)
{
H4((char *)binary);
}
static int get_hash_0(int index)
{
H0(buffer[index].out);
}
static int get_hash_1(int index)
{
H1(buffer[index].out);
}
static int get_hash_2(int index)
{
H2(buffer[index].out);
}
static int get_hash_3(int index)
{
H3(buffer[index].out);
}
static int get_hash_4(int index)
{
H4(buffer[index].out);
}
static int salt_hash(void *salt)
{
int i, h;
i = strlen((char *)salt) - 1;
if (i > 1) i--;
h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])];
h ^= ((unsigned char *)salt)[i - 1];
h <<= 6;
h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i - 1])];
h ^= ((unsigned char *)salt)[i];
return h & (SALT_HASH_SIZE - 1);
}
static void set_salt(void *salt)
{
strcpy(saved_salt, salt);
}
static void set_key(char *key, int index)
{
strnzcpy(buffer[index].key, key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return buffer[index].key;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
int failed = 0;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(index) shared(count, failed, local, saved_salt, buffer)
#endif
for (index = 0; index < count; index++) {
uint8_t *hash;
hash = escrypt_r(&(local[index]),
(const uint8_t *)(buffer[index].key),
strlen(buffer[index].key),
(const uint8_t *)saved_salt,
(uint8_t *)&(buffer[index].out),
sizeof(buffer[index].out));
if (!hash) {
failed = 1;
buffer[index].out[0] = 0;
}
}
if (failed) {
fprintf(stderr, "scrypt memory allocation failed\n");
error();
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
// binary was created as 32 bytes. It will always be
// <= length of buffer.out. So we use the binary as
// our hash indication lentth (and avoid looking at last byte)
int len = strlen(buffer[0].out)-2;
for (index = 0; index < count; index++)
if (!strncmp((char *)binary, buffer[index].out, len))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
int len = strlen(buffer[index].out)-2;
return !strncmp((char *)binary, buffer[index].out,len);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int tunable_cost_N(void *salt)
{
const uint8_t * setting;
const uint8_t * src;
uint64_t N;
setting = salt;
if (setting[0] != '$' || setting[1] != '7' || setting[2] != '$')
return 0;
src = setting + 3;
{
uint32_t N_log2;
if (decode64_one(&N_log2, *src))
return 0;
src++;
N = (uint64_t)1 << N_log2;
}
return (unsigned int) N;
}
static unsigned int tunable_cost_r(void *salt)
{
const uint8_t * setting;
const uint8_t * src;
uint32_t r;
setting = salt;
if (setting[0] != '$' || setting[1] != '7' || setting[2] != '$')
return 0;
src = setting + 3;
{
uint32_t N_log2;
if (decode64_one(&N_log2, *src))
return 0;
src++;
}
src = decode64_uint32(&r, 30, src);
if (!src)
return 0;
return (unsigned int) r;
}
static unsigned int tunable_cost_p(void *salt)
{
const uint8_t * setting;
const uint8_t * src;
uint32_t r, p;
setting = salt;
if (setting[0] != '$' || setting[1] != '7' || setting[2] != '$')
return 0;
src = setting + 3;
{
uint32_t N_log2;
if (decode64_one(&N_log2, *src))
return 0;
src++;
}
src = decode64_uint32(&r, 30, src);
if (!src)
return 0;
src = decode64_uint32(&p, 30, src);
if (!src)
return 0;
return (unsigned int) p;
}
struct fmt_main fmt_scrypt = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"N",
"r",
"p"
},
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
tunable_cost_N,
tunable_cost_r,
tunable_cost_p
},
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
NULL,
NULL
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
|
GB_binop__bset_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_01__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int64)
// C=scalar+B GB (_bind1st__bset_int64)
// C=scalar+B' GB (_bind1st_tran__bset_int64)
// C=A+scalar GB (_bind2nd__bset_int64)
// C=A'+scalar GB (_bind2nd_tran__bset_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_BITSET (aij, bij, int64_t, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, int64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_INT64 || GxB_NO_BSET_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bset_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, int64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bset_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, int64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sageInterface_modified.h | #ifndef ROSE_SAGE_INTERFACE
#define ROSE_SAGE_INTERFACE
#include "sage3basic.hhh"
#include <stdint.h>
#include <utility>
#include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
#if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference
SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project );
#else
SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project );
#endif
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "rewrite.h"
#endif
// DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser.
#include "astUnparseAttribute.h"
#include <set>
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "LivenessAnalysis.h"
#include "abstract_handle.h"
#include "ClassHierarchyGraph.h"
#endif
// DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h
//! A global function for getting the string associated with an enum (which is defined in global scope)
ROSE_DLL_API std::string getVariantName (VariantT v);
// DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE
// This namespace is specific to interface functions that operate on the Sage III AST.
// The name was chosen so as not to conflict with other classes within ROSE.
// This will become the future home of many interface functions which operate on
// the AST and which are generally useful to users. As a namespace multiple files can be used
// to represent the compete interface and different developers may contribute interface
// functions easily.
// Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008)
// We could add simpler layers of support for construction of IR nodes by
// hiding many details in "makeSg***()" functions. Such functions would
// return pointers to the associated Sg*** objects and would be able to hide
// many IR specific details, including:
// memory handling
// optional parameter settings not often required
// use of Sg_File_Info objects (and setting them as transformations)
//
// namespace AST_Interface (this name is taken already by some of Qing's work :-)
//! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode()
#define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode()
//------------------------------------------------------------------------
/*! \brief This namespace is to organize functions that are useful when operating on the AST.
\defgroup frontendSageUtilityFunctions SAGE III utility functions(SageInterface)
\ingroup rose_frontend
The Sage III IR design attempts to be minimalist. Thus additional functionality is
intended to be presented using separate higher level interfaces which work with the IR.
The namespace, SageInterface, collects functions that operate on the IR and are supportive of numerous types of routine operations required to support general analysis and transformation of the AST.
\internal Further organization of the functions in this namespace is required.
Major AST manipulation functions are scattered in the following directories
- src/midend/astUtil/astInterface
- src/roseSupport/utility_function.h, namespace ROSE
- src/roseSupport/TransformationSupport.h, class TransformationSupport
- src/midend/astInlining/inlinerSupport.C
- src/frontend/SageIII/sageInterface
- projects: such as outliner, OpenMP_Translator
Some other utility functions not related AST can be found in
- src/util/stringSupport/string_functions.h, namespace StringUtility
- src/roseExtensions/dataStructureTraversal/helpFunctions.C
- projects/dataStructureGraphing/helpFunctions.C
\todo A number of additional things to do:
- Pull scope handling out of EDG/Sage III translation so that is is made
available to anyone else building the Sage III IR from scratch (which
when it gets non-trivial, involves the manipulation of scopes).
- Other stuff ...
*/
namespace SageInterface
{
// DQ (4/3/2014): Added general AST support seperate from the AST.
// Container and API for analysis information that is outside of the AST and as a result
// prevents frequent modification of the IR.
class DeclarationSets
{
// DQ (4/3/2014): This stores all associated declarations as a map of sets.
// the key to the map is the first nondefining declaration and the elements of the set are
// all of the associated declarations (including the defining declaration).
private:
//! Map of first-nondefining declaration to all other associated declarations.
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap;
public:
void addDeclaration(SgDeclarationStatement* decl);
const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl);
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap();
bool isLocatedInDefiningScope(SgDeclarationStatement* decl);
};
// DQ (4/3/2014): This constucts a data structure that holds analysis information about
// the AST that is seperate from the AST. This is intended to be a general mechanism
// to support analysis information without constantly modifing the IR.
DeclarationSets* buildDeclarationSets(SgNode*);
//! An internal counter for generating unique SgName
ROSE_DLL_API extern int gensym_counter;
// tps : 28 Oct 2008 - support for finding the main interpretation
SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file);
//! Get the unsigned value of a disassembled constant.
uint64_t getAsmConstant(SgAsmValueExpression* e);
//! Get the signed value of a disassembled constant.
int64_t getAsmSignedConstant(SgAsmValueExpression *e);
//! Function to add "C" style comment to statement.
void addMessageStatement( SgStatement* stmt, std::string message );
//! A persistent attribute to represent a unique name for an expression
class UniqueNameAttribute : public AstAttribute
{
private:
std::string name;
public:
UniqueNameAttribute(std::string n="") {name =n; };
void set_name (std::string n) {name = n;};
std::string get_name () {return name;};
};
struct hash_nodeptr
{
// CH (4/9/2010): Use boost::hash instead
//#ifndef _MSC_VER
#if 0
//rose_hash::hash<char*> hasher;
#endif
public:
size_t operator()(SgNode* node) const
{
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
return (size_t) hash_value(node);
#else
return (size_t) node;
#endif
}
};
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void supplementReplacementSymbolMap ( rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> & inputReplacementMap );
#endif
//------------------------------------------------------------------------
//@{
/*! @name Symbol tables
\brief utility functions for symbol tables
*/
// Liao 1/22/2008, used for get symbols for generating variable reference nodes
// ! Find a variable symbol in current and ancestor scopes for a given name
ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
//! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL.
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList);
ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
// DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing.
//!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL);
// Liao, 1/24/2008, find exact match for a function
//!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName,
const SgType* t,
SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
// DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support).
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#if 0
// DQ (8/13/2013): This function does not make since any more, now that we have made the symbol
// table handling more precise and we have to provide template parameters for any template lookup.
// We also have to know if we want to lookup template classes, template functions, or template
// member functions (since each have specific requirements).
SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#endif
#if 0
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
// Where these are called we might not know enough information about the template parameters or function
// types, for example.
SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
#endif
// DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments.
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL);
ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL);
// DQ (7/17/2011): Added function from cxx branch that I need here for the Java support.
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope);
/*! \brief set_name of symbol in symbol table.
This function extracts the symbol from the relavant symbol table,
changes the name (at the declaration) and reinserts it into the
symbol table.
\internal I think this is what this function does, I need to double check.
*/
// DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName
// to this location where it can be a part of the interface for the Sage III AST.
ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name);
/*! \brief Output function type symbols in global function type symbol table.
*/
void outputGlobalFunctionTypeSymbolTable ();
// DQ (6/27/2005):
/*! \brief Output the local symbol tables.
\implementation Each symbol table is output with the file infor where it is located in the source code.
*/
ROSE_DLL_API void outputLocalSymbolTables (SgNode * node);
class OutputLocalSymbolTables:public AstSimpleProcessing
{
public:
void visit (SgNode * node);
};
/*! \brief Regenerate the symbol table.
\implementation current symbol table must be NULL pointer before calling this
function (for safety, but is this a good idea?)
*/
// DQ (9/28/2005):
void rebuildSymbolTable (SgScopeStatement * scope);
/*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted.
*/
void clearUnusedVariableSymbols (SgNode* root = NULL);
// DQ (3/1/2009):
//! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table.
void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help );
//@}
//------------------------------------------------------------------------
//@{
/*! @name Stringify
\brief Generate a useful string (name) to describe a SgNode
*/
/*! \brief Generate a useful name to describe the SgNode
\internal default names are used for SgNode objects that can not be associated with a name.
*/
// DQ (9/21/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgNode * node);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgStatement * stmt);
/*! \brief Generate a useful name to describe the expression
\internal default names are used for expressions that can not be associated with a name.
*/
std::string get_name (const SgExpression * expr);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgDeclarationStatement * declaration);
/*! \brief Generate a useful name to describe the scope
\internal default names are used for scope that cannot be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgScopeStatement * scope);
/*! \brief Generate a useful name to describe the SgSymbol
\internal default names are used for SgSymbol objects that cannot be associated with a name.
*/
// DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support).
std::string get_name (const SgSymbol * symbol);
/*! \brief Generate a useful name to describe the SgType
\internal default names are used for SgType objects that cannot be associated with a name.
*/
std::string get_name (const SgType * type);
/*! \brief Generate a useful name to describe the SgSupport IR node
*/
std::string get_name (const SgSupport * node);
/*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node
*/
std::string get_name (const SgLocatedNodeSupport * node);
/*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node
*/
std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive );
/*! \brief Generate a useful name to describe the SgToken IR node
*/
std::string get_name ( const SgToken* token );
//@}
//------------------------------------------------------------------------
//@{
/*! @name Class utilities
\brief
*/
/*! \brief Get the default destructor from the class declaration
*/
// DQ (6/21/2005): Get the default destructor from the class declaration
SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration *
classDeclaration);
/*! \brief Get the default constructor from the class declaration
*/
// DQ (6/22/2005): Get the default constructor from the class declaration
ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration *
classDeclaration);
/*! \brief Return true if template definition is in the class, false if outside of class.
*/
// DQ (8/27/2005):
bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl
* memberFunctionDeclaration);
/*! \brief Generate a non-defining (forward) declaration from a defining function declaration.
\internal should put into sageBuilder ?
*/
// DQ (9/17/2005):
SgTemplateInstantiationMemberFunctionDecl*
buildForwardFunctionDeclaration
(SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! Check if a SgNode is a declaration for a structure
bool isStructDeclaration(SgNode * node);
//! Check if a SgNode is a declaration for a union
bool isUnionDeclaration(SgNode * node);
#if 0
// DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration
// (so that it can handle template functions and member functions)
/*! \brief Return true if member function of a template member function,
of false if a non-template member function in a templated class.
*/
// DQ (8/27/2005):
bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl *
memberFunctionDeclaration);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Misc.
\brief Not sure the classifications right now
*/
// DQ (2/12/2012): Added some diagnostic support.
//! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened.
void whereAmI(SgNode* node);
//! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp".
std::string extractPragmaKeyword(const SgPragmaDeclaration *);
//! Check if a node is SgOmp*Statement
ROSE_DLL_API bool isOmpStatement(SgNode* );
/*! \brief Return true if function is overloaded.
*/
// DQ (8/27/2005):
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
// DQ (2/14/2012): Added support function used for variable declarations in conditionals.
//! Support function used for variable declarations in conditionals
void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body);
//! Support function used for variable declarations in conditionals
void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body);
//! Support function used for variable declarations in conditionals
void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body);
//! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute")
void annotateExpressionsWithUniqueNames (SgProject* project);
//! Check if a SgNode is a main() function declaration
ROSE_DLL_API bool isMain (const SgNode* node);
// DQ (6/22/2005):
/*! \brief Generate unique name from C and C++ constructs. The name may contain space.
This is support for the AST merge, but is generally useful as a more general mechanism than
name mangling which is more closely ties to the generation of names to support link-time function name
resolution. This is more general than common name mangling in that it resolves more relevant differences
between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;").
\implementation current work does not support expressions.
*/
std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations);
/** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter.
* @param baseName the word to be included in the variable names. */
std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp");
// DQ (8/10/2010): Added const to first parameter.
// DQ (3/10/2007):
//! Generate a unique string from the source file position information
std::string declarationPositionString (const SgDeclarationStatement * declaration);
// DQ (1/20/2007):
//! Added mechanism to generate project name from list of file names
ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false );
//! Given a SgExpression that represents a named function (or bound member
//! function), return the mentioned function
SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func);
//! Get the mask expression from the header of a SgForAllStatement
SgExpression* forallMaskExpression(SgForAllStatement* stmt);
//! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t
void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t);
// DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation).
/*! \brief Support for faster mangled name generation (caching avoids recomputation).
*/
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void clearMangledNameCache (SgGlobal * globalScope);
void resetMangledNameCache (SgGlobal * globalScope);
#endif
std::string getMangledNameFromCache (SgNode * astNode);
std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName);
SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically
//! Used to have a struct declaration embedded into a variable declaration
void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl);
// DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the
// bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration );
//! Check if a defining declaration comes before of after the non-defining declaration.
bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration);
// DQ (10/19/2006): Function calls have interesting context dependent rules to determine if
// they are output with a global qualifier or not. Were this is true we have to avoid global
// qualifiers, since the function's scope has not been defined. This is an example of where
// qualification of function names in function calls are context dependent; an interesting
// example of where the C++ language is not friendly to source-to-source processing :-).
bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall);
/*! \brief Compute the intersection set for two ASTs.
This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST.
*/
ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL);
//! Deep copy an arbitrary subtree
ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree);
//! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e);
template <typename NodeType>
NodeType* deepCopy (const NodeType* subtree) {
return dynamic_cast<NodeType*>(deepCopyNode(subtree));
}
//! Deep copy an expression
ROSE_DLL_API SgExpression* copyExpression(SgExpression* e);
//!Deep copy a statement
ROSE_DLL_API SgStatement* copyStatement(SgStatement* s);
// from VarSym.cc in src/midend/astOutlining/src/ASTtools
//! Get the variable symbol for the first initialized name of a declaration stmt.
ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl);
//! Get the first initialized name of a declaration statement
ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl);
//! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now.
ROSE_DLL_API void myRemoveStatement(SgStatement* stmt);
ROSE_DLL_API bool isConstantTrue(SgExpression* e);
ROSE_DLL_API bool isConstantFalse(SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e);
//! Check if a declaration has a "static' modifier
bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt);
//! Set a declaration as static
ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt);
//! Check if a declaration has an "extern" modifier
ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt);
//! Set a declaration as extern
ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt);
//! Interface for creating a statement whose computation writes its answer into
//! a given variable.
class StatementGenerator {
public:
virtual ~StatementGenerator() {};
virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0;
};
//! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc)
//!
//! Return the left hand, right hand expressions and if the left hand variable is also being read
bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL);
//! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. This function will convert them all to a top level SgInitializedName.
ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current);
//! Build an abstract handle from an AST node, reuse previously built handle when possible
ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*);
//! Obtain a matching SgNode from an abstract handle string
ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string);
//! Dump information about a SgNode for debugging
ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc="");
//! Reorder a list of declaration statements based on their appearance order in source files
ROSE_DLL_API std::vector<SgDeclarationStatement*>
sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec);
// DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names.
//! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc.
// bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp );
bool isPrefixOperator( SgExpression* exp );
//! Check for proper names of possible prefix operators (used in isPrefixOperator()).
bool isPrefixOperatorName( const SgName & functionName );
//! Is an overloaded operator a postfix operator. (e.g. ).
bool isPostfixOperator( SgExpression* exp );
//! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()).
bool isIndexOperator( SgExpression* exp );
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST properties
\brief version, language properties of current AST.
*/
// std::string version(); // utility_functions.h, version number
/*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use!
*/
ROSE_DLL_API bool is_C_language ();
ROSE_DLL_API bool is_OpenMP_language ();
ROSE_DLL_API bool is_UPC_language ();
//! Check if dynamic threads compilation is used for UPC programs
ROSE_DLL_API bool is_UPC_dynamic_threads();
ROSE_DLL_API bool is_C99_language ();
ROSE_DLL_API bool is_Cxx_language ();
ROSE_DLL_API bool is_Java_language ();
ROSE_DLL_API bool is_Fortran_language ();
ROSE_DLL_API bool is_CAF_language ();
ROSE_DLL_API bool is_PHP_language();
ROSE_DLL_API bool is_Python_language();
ROSE_DLL_API bool is_Cuda_language();
ROSE_DLL_API bool is_X10_language();
ROSE_DLL_API bool is_binary_executable();
ROSE_DLL_API bool is_mixed_C_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language ();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Scope
\brief
*/
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Assigns unique numbers to each SgScopeStatement of a function.
This is used to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void resetScopeNumbers (SgFunctionDefinition * functionDeclaration);
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Clears the cache of scope,integer pairs for the input function.
This is used to clear the cache of computed unique labels for scopes in a function.
This function should be called after any transformation on a function that might effect
the allocation of scopes and cause the existing unique numbers to be incorrect.
This is part of support to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void clearScopeNumbers (SgFunctionDefinition * functionDefinition);
//!Find the enclosing namespace of a declaration
SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration);
// SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
//!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor)
bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Preprocessing Information
\brief #if-#else-#end, comments, #include, etc
*/
//! Dumps a located node's preprocessing information.
void dumpPreprocInfo (SgLocatedNode* locatedNode);
//! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX.
ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL);
//! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon.
ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position.
ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef,
PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation.
ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf);
//!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo()
ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf);
//! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes.
ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target,
const std::string & text,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before);
//!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on.
ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Source File Position
\brief set Sg_File_Info for a SgNode
*/
//! Build and attach comment, comment style is inferred from the language type of the target node if not provided
ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before,
PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration);
// DQ (11/25/2009): Added matching support for adding comments to SgAsm nodes.
// Build and attach comment
// void attachComment(SgAsmStatement* target, const std::string & content );
// DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface
//! Add a string to be unparsed to support code generation for back-end specific tools or compilers.
ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation );
// ************************************************************************
// Newer versions of now depricated functions
// ************************************************************************
// DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder
// interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This
// function is the only function that should be called directly (though in a namespace we can't define permissions).
//! Set the source code positon for the current (input) node.
ROSE_DLL_API void setSourcePosition(SgNode* node);
// A better name might be "setSourcePositionForSubTree"
//! Set the source code positon for the subtree (including the root).
ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root);
// DQ (5/1/2012): New function with improved name (still preserving the previous interface).
// This function is not required once the new mechanism defining a source position mode is complete (shortly).
//! Set subtree as a transformation.
// void setSourcePositionAtRootAndAllChildrenAsTransformation(SgNode *root);
// void setSourcePositionAtRootAndAllChildrenAsDefault(SgNode *root);
// Removed to force use of the API and permit flexability in the lower level implementation.
//! DQ (5/1/2012): New function with improved name.
// void setSourcePositionToDefault( SgLocatedNode* locatedNode );
template<class T> void setSourcePositionToDefault( T* node );
//! DQ (5/1/2012): New function with improved name.
void setSourcePositionAsTransformation(SgNode *node);
// DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability).
void setSourcePositionPointersToNull(SgNode *node);
// ************************************************************************
// ************************************************************************
// Older deprecated functions
// ************************************************************************
// Liao, 1/8/2007, set file info. for a whole subtree as transformation generated
//! Set current node's source position as transformation generated
ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node);
//! Set current node's source position as NULL
ROSE_DLL_API void setOneSourcePositionNull(SgNode *node);
//! Recursively set source position info(Sg_File_Info) as transformation generated
ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root);
//! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool
ROSE_DLL_API void setSourcePositionForTransformation_memoryPool();
//! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage.
// ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode);
// ************************************************************************
//@}
//------------------------------------------------------------------------
//@{
/*! @name Data types
\brief
*/
// from src/midend/astInlining/typeTraits.h
// src/midend/astUtil/astInterface/AstInterface.h
//! Get the right bool type according to C or C++ language input
SgType* getBoolType(SgNode* n);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
////!
////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//!Get the data type of the first initialized name of a declaration statement
ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl);
//! Is a type default constructible? This may not quite work properly.
ROSE_DLL_API bool isDefaultConstructible(SgType* type);
//! Is a type copy constructible? This may not quite work properly.
ROSE_DLL_API bool isCopyConstructible(SgType* type);
//! Is a type assignable? This may not quite work properly.
ROSE_DLL_API bool isAssignable(SgType* type);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//! Check if a class type is a pure virtual class. True means that there is at least
//! one pure virtual function that has not been overridden.
//! In the case of an incomplete class type (forward declaration), this function returns false.
ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy);
#endif
//! Does a type have a trivial (built-in) destructor?
ROSE_DLL_API bool hasTrivialDestructor(SgType* t);
//! Is this type a non-constant reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isNonconstReference(SgType* t);
//! Is this type a const or non-const reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isReferenceType(SgType* t);
//! Is this type a pointer type? (Handles typedefs correctly)
ROSE_DLL_API bool isPointerType(SgType* t);
//! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to
//! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile,
//! it returns false for (int const * x) and (int const * const x) because these types point to a const int.
//! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns
//! false for const (int * const * x)
ROSE_DLL_API bool isPointerToNonConstType(SgType* type);
//! Is this a const type?
/* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char.
* Similarly, neither for const int b[10]; or const int & c =10;
* The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of
the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type".
*/
ROSE_DLL_API bool isConstType(SgType* t);
//! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers.
SgType* removeConst(SgType* t);
//! Is this a volatile type?
ROSE_DLL_API bool isVolatileType(SgType* t);
//! Is this a restrict type?
ROSE_DLL_API bool isRestrictType(SgType* t);
//! Is this a scalar type?
/*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary
*/
ROSE_DLL_API bool isScalarType(SgType* t);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
//!
//! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool.
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//! Check if a type is a struct type (a special SgClassType in ROSE)
ROSE_DLL_API bool isStructType(SgType* t);
//! Generate a mangled string for a given type based on Itanium C++ ABI
ROSE_DLL_API std::string mangleType(SgType* type);
//! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE
ROSE_DLL_API std::string mangleScalarType(SgType* type);
//! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types.
ROSE_DLL_API std::string mangleModifierType(SgModifierType* type);
//! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array.
ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t);
//! Get the number of dimensions of an array type
ROSE_DLL_API int getDimensionCount(SgType* t);
//! Get the element type of an array
ROSE_DLL_API SgType* getArrayElementType(SgType* t);
//! Get the element type of an array, pointer or string, or NULL if not applicable
ROSE_DLL_API SgType* getElementType(SgType* t);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// Note, the first entry of the array is a SgNullExpression, iff the
/// first array dimension was not specified.
/// \code
/// int x[] = { 1, 2, 3 };
/// \endcode
/// note, the expression does not have to be a constant
/// \code
/// int x[i*5];
/// \endcode
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \param varref a reference to an array variable (the variable of type arrtype)
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// If the first array dimension was not specified an expression
/// that indicates that size is generated.
/// \code
/// int x[][3] = { 1, 2, 3, 4, 5, 6 };
/// \endcode
/// the entry for the first dimension will be:
/// \code
/// // 3 ... size of 2nd dimension
/// sizeof(x) / (sizeof(int) * 3)
/// \endcode
/// \pre arrtype is the array-type of varref
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
/// \post !isSgNullExpression(return-value[*])
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref);
/// \overload
/// \note see get_C_array_dimensions for SgVarRefExp for details.
/// \todo make initname const
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname);
//! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp.
ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL);
//! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information.
/*!
* Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense.
AST graph for some examples:
- shared scalar: SgModifierType -->base type
- shared array: SgArrayType --> SgModiferType --> base type
- shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt
- shared to private: SgModifierType --> SgPointerType --> base type
- private to shared: SgPointerType --> SgModifierType --> base type
*/
ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL );
//! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property.
/*!
* ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property.
*/
ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL);
//! Check if a modifier type is a UPC shared type.
ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type);
//! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array.
ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type);
//! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.)
ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type);
//! Get the block size of a UPC shared modifier type
ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type);
//! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays)
ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t);
//! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type.
ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t);
//! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first.
ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t);
//! Is a UPC array with dimension of X*THREADS
ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t);
//! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to.
ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL);
// DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types.
//! Get the type of the associated argument expression from the function type.
ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Loop handling
\brief
*/
// by Jeremiah
//! Add a step statement to the end of a loop body
//! Add a new label to the end of the loop, with the step statement after
//! it; then change all continue statements in the old loop body into
//! jumps to the label
//!
//! For example:
//! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes
//! while (a < 5) {if (a < -3) goto label; label: a++;}
ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step);
ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f);
ROSE_DLL_API void convertForToWhile(SgForStatement* f);
ROSE_DLL_API void convertAllForsToWhiles(SgNode* top);
//! Change continue statements in a given block of code to gotos to a label
ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label);
//!Return the loop index variable for a for loop
ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop);
//!Check if a SgInitializedName is used as a loop index within a AST subtree
//! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them.
ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root);
//! Routines to get and set the body of a loop
ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop);
ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body);
//! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop
ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop);
//! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop.
ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond);
//! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested
//!
//! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition
ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL);
//! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1
ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/);
//! Set the lower bound of a loop header for (i=lb; ...)
ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb);
//! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...)
ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub);
//! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc)
ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride);
//! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary
ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop);
//! Normalize a for loop, return true if successful. Generated constants will be fold by default.
//!
//! Translations are :
//! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..)
//! For test expression:
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
//! For increment expression:
//! i++ is normalized to i+=1 and
//! i-- is normalized to i+=-1
//! i-=s is normalized to i+= -s
ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true);
//!Normalize a Fortran Do loop. Make the default increment expression (1) explicit
ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop);
//! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor.
ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor);
//! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!).
ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder);
//! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s
ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize);
//Winnie Loop Collapsing
SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Topdown search
\brief Top-down traversal from current node to find a node of a specified type
*/
//! Query a subtree to get all nodes of a given type, with an appropriate downcast.
template <typename NodeType>
std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant)
{
Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant);
std::vector<NodeType*> result(nodes.size(), NULL);
int count = 0;
for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin();
i != nodes.end(); ++i, ++count) {
NodeType* node = dynamic_cast<NodeType*>(*i);
ROSE_ASSERT (node);
result[count] = node;
}
return result;
}
/*! \brief Returns STL vector of SgFile IR node pointers.
Demonstrates use of restricted traversal over just SgFile IR nodes.
*/
std::vector < SgFile * >generateFileList ();
/** Get the current SgProject IR Node.
*
* The library should never have more than one project and it asserts such. If no project has been created yet then this
* function returns the null pointer. */
ROSE_DLL_API SgProject * getProject();
//! Query memory pools to grab SgNode of a specified type
template <typename NodeType>
static std::vector<NodeType*> getSgNodeListFromMemoryPool()
{
// This function uses a memory pool traversal specific to the SgFile IR nodes
class MyTraversal : public ROSE_VisitTraversal
{
public:
std::vector<NodeType*> resultlist;
void visit ( SgNode* node)
{
NodeType* result = dynamic_cast<NodeType* > (node);
ROSE_ASSERT(result!= NULL);
if (result!= NULL)
{
resultlist.push_back(result);
}
};
virtual ~MyTraversal() {}
};
MyTraversal my_traversal;
NodeType::visitRepresentativeNode(my_traversal);
return my_traversal.resultlist;
}
/*! \brief top-down traversal from current node to find the main() function declaration
*/
ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode);
//! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another declaration statement
SgStatement* findLastDeclarationStatement(SgScopeStatement * scope);
//midend/programTransformation/partialRedundancyElimination/pre.h
//! Find referenced symbols within an expression
std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr);
//! Find break statements inside a particular statement, stopping at nested loops or switches
/*! loops or switch statements defines their own contexts for break
statements. The function will stop immediately if run on a loop or switch
statement. If fortranLabel is non-empty, breaks (EXITs) to that label within
nested loops are included in the returned list.
*/
std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = "");
//! Find all continue statements inside a particular statement, stopping at nested loops
/*! Nested loops define their own contexts for continue statements. The
function will stop immediately if run on a loop
statement. If fortranLabel is non-empty, continues (CYCLEs) to that label
within nested loops are included in the returned list.
*/
std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = "");
std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l);
std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw);
//! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag.
template <typename T>
T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining)
{
bool found = false;
if (!root) return 0;
T* decl = dynamic_cast<T*>(root);
if (decl!=NULL)
{
if (scope)
{
if ((decl->get_scope() == scope)&&
(decl->search_for_symbol_from_symbol_table()->get_name()==name))
{
found = true;
}
}
else // Liao 2/9/2010. We should allow NULL scope
{
if(decl->search_for_symbol_from_symbol_table()->get_name()==name)
{
found = true;
}
}
}
if (found)
{
if (isDefining)
{
ROSE_ASSERT (decl->get_definingDeclaration() != NULL);
return dynamic_cast<T*> (decl->get_definingDeclaration());
}
else
return decl;
}
std::vector<SgNode*> children = root->get_traversalSuccessorContainer();
for (std::vector<SgNode*>::const_iterator i = children.begin();
i != children.end(); ++i)
{
T* target= findDeclarationStatement<T> (*i,name, scope, isDefining);
if (target)
return target;
}
return 0;
}
//! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>.
SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining);
#if 0 //TODO
// 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX
// until reach the end node
SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
// 2. return all nodes of type VariantT following the source node
std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Bottom up search
\brief Backwards traverse through the AST to find a node, findEnclosingXXX()
*/
// remember to put const to all arguments.
/** Find a node by type using upward traversal.
*
* Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant
* ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the
* starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode.
*
* For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first
* non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining
* declaration is different than the first non-defining declaration.
*
* If no ancestor of the requisite type of subtypes is found then this function returns a null pointer.
*
* If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot
* be an enclosing node of the specified type. */
template <typename NodeType>
NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false)
{
#if 1
// DQ (10/20/2012): This is the older version of this implementation. Until I am sure that
// the newer version (below) is what we want to use I will resolve this conflict by keeping
// the previousl version in place.
if (NULL == astNode)
{
return NULL;
}
if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) )
{
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode));
}
// DQ (3/5/2012): Check for reference to self...
ROSE_ASSERT(astNode->get_parent() != astNode);
SgNode* parent = astNode->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
SgNode* previouslySeenParent = parent;
bool foundCycle = false;
while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode(): parent = %p = %s \n",parent,parent->class_name().c_str());
#endif
parent = parent->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
// ROSE_ASSERT(parent != previouslySeenParent);
if (parent == previouslySeenParent)
{
foundCycle = true;
}
}
#if 0
printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
parent = previouslySeenParent;
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p \n",declarationStatement);
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the non-defining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
#if 0
printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
// DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for
// debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However,
// this will have to be revisited later since it appears clear that it is a problem for the binary analysis
// work when it is visited for this case. Since the cycle is detected, but there is no assertion on the
// cycle, we don't exit when a cycle is identified (which is the point of the code below).
// Note also that I have fixed the code (above and below) to only chase pointers through defining
// declarations (where they exist), this is important since non-defining declarations can be almost
// anywhere (and thus chasing them can make it appear that there are cycles where there are none
// (I think); test2012_234.C demonstrates an example of this.
// DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work.
// if (foundCycle == true)
if (foundCycle == false)
{
while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str());
if (parent->get_file_info() != NULL)
parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug");
#endif
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null");
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
parent = parent->get_parent();
#if 1
// DQ (3/5/2012): Check for loops that will cause infinite loops.
ROSE_ASSERT(parent != previouslySeenParent);
#else
printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n");
if (parent == previouslySeenParent)
break;
#endif
}
}
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent));
#else
// DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below).
// Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop).
// Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const.
SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent());
std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles
while (node) {
if (NodeType *found = dynamic_cast<NodeType*>(node))
return found;
// FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09]
ROSE_ASSERT(seen.insert(node).second);
// Traverse to parent (declaration statements are a special case)
if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) {
SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) {
// DQ (10/19/2012): Use the defining declaration instead.
// node = firstNondefiningDeclaration;
node = definingDeclaration;
}
} else {
node = node->get_parent();
}
}
return NULL;
#endif
}
//! Find enclosing source file node
ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false);
//! Get the closest scope from astNode. Return astNode if it is already a scope.
ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode);
//! Get the enclosing scope from a node n
ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false);
//! Traverse back through a node's parents to find the enclosing global scope
ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode);
//! Find the function definition
ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false);
ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false);
//! Find the closest enclosing statement, including the given node
ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n);
//! Find the closest switch outside a given statement (normally used for case and default statements)
ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s);
//! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it
ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false);
//! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration.
ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false);
//roseSupport/utility_functions.h
//! get the SgFile node from current node
ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode );
//! Get the initializer containing an expression if it is within an initializer.
ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n);
//! Get the closest class definition enclosing the specified AST node,
ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false);
// TODO
#if 0
SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
SgVariableDeclaration* findVariableDeclaratin( const string& varname)
SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode);
// e.g. for some expression, find its parent statement
SgStatement* getEnclosingStatement(const SgNode* astNode);
SgSwitchStatement* getEnclosingSwitch(SgStatement* s);
SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode);
// used to build a variable reference for compiler generated code in current scope
SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Walk and Traversal
\brief
*/
// Liao, 1/9/2008
/*!
\brief return the first global scope under current project
*/
ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project);
/*!
\brief get the last statement within a scope, return NULL if it does not exit
*/
ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope);
//! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers.
ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false);
//!Find the first defining function declaration statement in a scope
ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope);
//! Get next statement within the same scope of current statement
ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt);
//! Get previous statement within the same scope of current statement
ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt);
#if 0 //TODO
// preorder traversal from current SgNode till find next SgNode of type V_SgXXX
SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Comparison
\brief Compare AST nodes, subtree, etc
*/
//! Check if a SgIntVal node has a given value
ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value);
//! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same.
/*!
* There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C
*/
ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2);
//! Check if a statement is the last statement within its closed scope
ROSE_DLL_API bool isLastStatement(SgStatement* stmt);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST insert, removal, and replacement
\brief Add, remove,and replace AST
scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc.
*/
// DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining).
//! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result.
ROSE_DLL_API void deleteAST(SgNode* node);
//! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only).
ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root);
// DQ (2/25/2009): Added new function to support outliner.
//! Move statements in first block to the second block (preserves order and rebuilds the symbol table).
ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock );
//! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc.
ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope);
//! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
// DQ (2/6/2009): Added function to support outlining into separate file.
//! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers).
ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles );
//! Prepend a statement to the beginning of the current scope, handling side
//! effects as appropriate
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! prepend a list of statements to the beginning of the current scope,
//! handling side effects as appropriate
ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
//! Check if a scope statement has a simple children statement list
//! so insert additional statements under the scope is straightforward and unambiguous .
//! for example, SgBasicBlock has a simple statement list while IfStmt does not.
ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope);
//! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically
ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before or after the target statement within the
//target's scope
ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true);
//! Insert a statement before a target statement
ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before a target statement
ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts);
//! Insert a statement after a target statement, Move around preprocessing info automatically by default
ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements after a target statement
ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt);
//! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope);
//! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope);
//! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements
// then the statement is inserted at the end of the scope.
ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope,
bool movePreprocessingInfo=true);
//! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements
//then the new statements are inserted at the end of the scope.
ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts,
SgScopeStatement *scope);
//! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()).
ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true);
//! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST()
ROSE_DLL_API void deepDelete(SgNode* root);
//! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested.
ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false);
//! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node.
ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern);
//! Replace all variable references to an old symbol in a scope to being references to a new symbol.
// Essentially replace variable a with b.
ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope );
/** Given an expression, generates a temporary variable whose initializer optionally evaluates
* that expression. Then, the var reference expression returned can be used instead of the original
* expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp;
* this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles
* reference types correctly by using pointer types for the temporary.
* @param expression Expression which will be replaced by a variable
* @param scope scope in which the temporary variable will be generated
* @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed
* @return declaration of the temporary variable, and a a variable reference expression to use instead of
* the original expression. */
std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression,
SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL);
/* This function creates a temporary variable for a given expression in the given scope
This is different from SageInterface::createTempVariableForExpression in that it does not
try to be smart to create pointers to reference types and so on. The tempt is initialized to expression.
The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration
may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage.
@param expression Expression which will be replaced by a variable
@param scope scope in which the temporary variable will be generated
*/
std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression
(SgExpression* expression, SgScopeStatement* scope);
//! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible
/*! We recommend to build SgFunctionParameterList before building a function declaration
However, it is still allowed to append new arguments for existing function declarations.
\todo function type , function symbol also need attention.
*/
ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*);
//!Prepend an argument to SgFunctionParameterList
ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*);
//! Append an expression to a SgExprListExp, set the parent pointer also
ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*);
//! Append an expression list to a SgExprListExp, set the parent pointers also
ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&);
//! Set parameter list for a function declaration, considering existing parameter list etc.
// void setParameterList(SgFunctionDeclaration *func,SgFunctionParameterList *paralist);
template <class actualFunction>
ROSE_DLL_API void setParameterList(actualFunction *func,SgFunctionParameterList *paralist);
# if 1
// DQ (11/25/2011): Moved to the header file so that it could be seen as a template function.
// TODO consider the difference between C++ and Fortran
// fixup the scope of arguments,no symbols for nondefining function declaration's arguments
template <class actualFunction>
void
// SageInterface::setParameterList(SgFunctionDeclaration * func,SgFunctionParameterList * paralist)
setParameterList(actualFunction* func, SgFunctionParameterList* paralist)
{
// DQ (11/25/2011): Modified this to be a templated function so that we can handle both
// SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member
// function derived classes).
ROSE_ASSERT(func != NULL);
ROSE_ASSERT(paralist != NULL);
#if 0
// At this point we don't have cerr and endl defined, so comment this code out.
// Warn to users if a paralist is being shared
if (paralist->get_parent() !=NULL)
{
cerr << "Waring! Setting a used SgFunctionParameterList to function: "
<< (func->get_name()).getString()<<endl
<< " Sharing parameter lists can corrupt symbol tables!"<<endl
<< " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl;
// ROSE_ASSERT(false);
}
#endif
// Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!!
if (func->get_parameterList() != NULL)
{
if (func->get_parameterList() != paralist)
{
delete func->get_parameterList();
}
}
func->set_parameterList(paralist);
paralist->set_parent(func);
// DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node.
// This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C,
// test2012_81.C and testcode2012_82.C demonstrate this problem.
SgInitializedNamePtrList & args = paralist->get_args();
for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++)
{
(*i)->set_declptr(func);
}
}
#endif
//! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer.
ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma);
//! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept.
ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false);
//! Replace a given expression with a list of statements produced by a generator
ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Similar to replaceExpressionWithStatement, but with more restrictions.
//! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in
ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc.
ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand);
//!set left hand operand for binary expressions, transparently downcasting target expressions when necessary
ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs);
//!set left hand operand for binary expression
ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs);
//! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly.
ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top);
// DQ (1/25/2010): Added support for directories
//! Move file to be generated in a subdirectory (will be generated by the unparser).
ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file );
//! Supporting function to comment relocation in insertStatement() and removeStatement().
ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement);
//! Relocate comments and CPP directives from one statement to another.
ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST repair, fix, and postprocessing.
\brief Mostly used internally when some AST pieces are built without knowing their target
scope/parent, especially during bottom-up construction of AST. The associated symbols,
parent and scope pointers cannot be set on construction then.
A set of utility functions are provided to
patch up scope, parent, symbol for them when the target scope/parent become know.
*/
//! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed.
/*! In AST translation, it is possible to build a variable reference before the variable
is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders
to get the work done. Users should call fixVariableReference() when AST is complete and all
variable declarations are in place.
*/
ROSE_DLL_API int fixVariableReferences(SgNode* root);
//!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known.
/*!
It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general.
In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement().
*/
ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope.
ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope.
ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope.
ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope);
//! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL.
ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope);
//! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed.
ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value);
//! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope
ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope);
//! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST.
ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope);
//@}
//! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope.
/*! This function not only set the defining and nondefining links of the newly introduced
* function declaration inside a scope, but also update other same function declarations' links
* accordingly if there are any.
* Assumption: The function has already inserted/appended/prepended into the scope before calling this function.
*/
ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope);
//------------------------------------------------------------------------
//@{
/*! @name Advanced AST transformations, analyses, and optimizations
\brief Some complex but commonly used AST transformations.
*/
//! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++
ROSE_DLL_API bool
collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false);
//!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement.
ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars);
//!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement.
ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars);
//!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement.
ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols);
//! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref);
//! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//!Call liveness analysis on an entire project
ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false);
//!get liveIn and liveOut variables for a for loop from liveness analysis result liv.
ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts);
#endif
//!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types.
ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, VariantT> > & results);
//! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations!
/*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */
ROSE_DLL_API void constantFolding(SgNode* r);
//!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements and return expressions with side effects. Return the number of statements inserted.
/*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement.
*/
ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s);
//! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments.
ROSE_DLL_API void removeJumpsToNextStatement(SgNode*);
//! Remove labels which are not targets of any goto statements
ROSE_DLL_API void removeUnusedLabels(SgNode* top);
//! Remove consecutive labels
ROSE_DLL_API void removeConsecutiveLabels(SgNode* top);
//! Replace an expression with a temporary variable and an assignment statement
/*!
Add a new temporary variable to contain the value of 'from'
Change reference to 'from' to use this new variable
Assumptions: 'from' is not within the test of a loop or 'if'
not currently traversing 'from' or the statement it is in
*/
ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = "");
//! Split long expressions into blocks of statements
ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr);
//! Remove labeled goto statements
ROSE_DLL_API void removeLabeledGotos(SgNode* top);
//! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label.
ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch);
//! Check if the body of a 'for' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs);
//! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs);
//! Check if the body of a 'while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws);
//! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws);
//! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws);
//! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs);
//! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs);
//! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs);
//! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true);
//! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos);
//! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt);
//! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while,
//! switch, If, Catch, OmpBodyStmt, etc
bool isBodyStatement (SgStatement* s);
//! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them.
void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true);
//! The same as changeAllBodiesToBlocks(SgNode* top). To be phased out.
void changeAllLoopBodiesToBlocks(SgNode* top);
//! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc.
SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt);
#if 0
/** If s is the body of a loop, catch, or if statement and is already a basic block,
* s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent
* (a loop, catch, or if statement, etc). */
SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s);
#endif
//! Get the constant value from a constant integer expression; abort on
//! everything else. Note that signed long longs are converted to unsigned.
unsigned long long getIntegerConstantValue(SgValueExp* expr);
//! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace.
std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt );
//! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned.
SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp);
//! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref.
SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL);
/// \brief moves the body of a function f to a new function f`;
/// f's body is replaced with code that forwards the call to f`.
/// \return a pair indicating the statement containing the call of f`
/// and an initialized name refering to the temporary variable
/// holding the result of f`. In case f returns void
/// the initialized name is NULL.
/// \param definingDeclaration the defining function declaration of f
/// \param newName the name of function f`
/// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; }
/// for functions returning void and a value, respectively.
/// two function declarations are inserted in f's enclosing scope
/// \code
/// result_type f`(...); <--- (1)
/// result_type f (...) { forward call to f` }
/// result_type f`(...) { original code } <--- (2)
/// \endcode
/// Calls to f are not updated, thus in the transformed code all
/// calls will continue calling f (this is also true for
/// recursive function calls from within the body of f`).
/// After the function has created the wrapper,
/// definingDeclaration becomes the wrapper function
/// The definition of f` is the next entry in the
/// statement list; the forward declaration of f` is the previous
/// entry in the statement list.
/// \pre definingDeclaration must be a defining declaration of a
/// free standing function.
/// typeid(SgFunctionDeclaration) == typeid(definingDeclaration)
/// i.e., this function is NOT implemented for class member functions,
/// template functions, procedures, etc.
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName);
/// \overload
/// \tparam NameGen functor that generates a new name based on the old name.
/// interface: SgName nameGen(const SgName&)
/// \param nameGen name generator
/// \brief see wrapFunction for details
template <class NameGen>
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen)
{
return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name()));
}
/// \brief convenience function that returns the first initialized name in a
/// list of variable declarations.
SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl);
//@}
// DQ (6/7/2012): Unclear where this function should go...
bool hasTemplateSyntax( const SgName & name );
//! Move a declaration to a scope which is the closest to the declaration's use places
bool moveDeclarationToInnermostScope(SgDeclarationStatement* decl, bool debug/*= false */);
#if 0
//------------------------AST dump, stringify-----------------------------
//------------------------------------------------------------------------
std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h
// do we need these?
std::string dump_node(const SgNode* astNode);
std::string dump_tree(const SgNode* astNode);
// or a friendly version of unparseToString(), as a memeber function
std::string SgNode::toString(bool asSubTree=true); // dump node or subtree
//----------------------------AST comparison------------------------------
//------------------------------------------------------------------------
// How to get generic functions for comparison?
bool isNodeEqual(SgNode* node1, SgNode* node2); //?
bool isTreeEqual(SgNode* tree1, SgNode* tree2);
//! Are two expressions equal (using a deep comparison)?
bool expressionTreeEqual(SgExpression*, SgExpression*);
//! Are corresponding expressions in two lists equal (using a deep comparison)?
bool expressionTreeEqualStar(const SgExpressionPtrList&,
const SgExpressionPtrList&);
//----------------------AST verfication/repair----------------------------
//------------------------------------------------------------------------
// sanity check of AST subtree, any suggestions?
// TODO
verifySgNode(SgNode* node, bool subTree=true);
//src/midend/astDiagnostics/AstConsistencyTests.h
// AstTests::runAllTests(SgProject * )
//src/midend/astUtil/astInterface/AstInterface.h.C
//FixSgProject(SgProject &project)
//FixSgTree(SgNode* r)
//src/frontend/SageIII/astPostProcessing
//AstPostProcessing(SgNode * node)
//--------------------------AST modification------------------------------
//------------------------------------------------------------------------
// any operations changing AST tree, including
// insert, copy, delete(remove), replace
// insert before or after some point, argument list is consistent with LowLevelRewrite
void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true);
// previous examples
//void myStatementInsert(SgStatement* target,...)
// void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock)
// copy
// copy children of one basic block to another basic block
//void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b);
void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst);
// delete (remove) a node or a whole subtree
void removeSgNode(SgNode* targetNode); // need this?
void removeSgNodeTree(SgNode* subtree); // need this?
void removeStatement( SgStatement* targetStmt);
//Move = delete + insert
void moveAst (SgNode* src, SgNode* target); // need this?
// similar to
void moveStatements (SgBasicBlock* src, SgBasicBlock* target);
// replace= delete old + insert new (via building or copying)
// DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE.
// void replaceAst(SgNode* oldNode, SgNode* newNode);
//void replaceChild(SgNode* parent, SgNode* from, SgNode* to);
//bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n)
//--------------------------AST transformations---------------------------
//------------------------------------------------------------------------
// Advanced AST modifications through basic AST modifications
// Might not be included in AST utitlity list, but listed here for the record.
// extract statements/content from a scope
void flattenBlocks(SgNode* n);
//src/midend/astInlining/inlinerSupport.h
void renameVariables(SgNode* n);
void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition);
void simpleCopyAndConstantPropagation(SgNode* top);
void changeAllMembersToPublic(SgNode* n);
void removeVariableDeclaration(SgInitializedName* initname);
//! Convert something like "int a = foo();" into "int a; a = foo();"
SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init);
//! Rewrites a while or for loop so that the official test is changed to
//! "true" and what had previously been the test is now an if-break
//! combination (with an inverted condition) at the beginning of the loop
//! body
void pushTestIntoBody(LoopStatement* loopStmt);
//programTransformation/finiteDifferencing/finiteDifferencing.h
//! Move variables declared in a for statement to just outside that statement.
void moveForDeclaredVariables(SgNode* root);
//------------------------ Is/Has functions ------------------------------
//------------------------------------------------------------------------
// misc. boolean functions
// some of them could moved to SgXXX class as a member function
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
bool isSwitchCond (const SgStatement* s);
bool isIfCond (const SgStatement* s);
bool isWhileCond (const SgStatement* s);
bool isStdNamespace (const SgScopeStatement* scope);
bool isTemplateInst (const SgDeclarationStatement* decl);
bool isCtor (const SgFunctionDeclaration* func);
bool isDtor (const SgFunctionDeclaration* func);
// src/midend/astInlining/typeTraits.h
bool hasTrivialDestructor(SgType* t);
ROSE_DLL_API bool isNonconstReference(SgType* t);
ROSE_DLL_API bool isReferenceType(SgType* t);
// generic ones, or move to the SgXXX class as a member function
bool isConst(SgNode* node); // const type, variable, function, etc.
// .... and more
bool isConstType (const SgType* type);
bool isConstFunction (const SgFunctionDeclaration* decl);
bool isMemberVariable(const SgInitializedName & var);
//bool isMemberVariable(const SgNode& in);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
bool MayRedefined(SgExpression* expr, SgNode* root);
// bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h
bool hasAddressTaken(SgExpression* expr, SgNode* root);
//src/midend/astInlining/inlinerSupport.C
// can also classified as topdown search
bool containsVariableReference(SgNode* root, SgInitializedName* var);
bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var);
bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc,
SgInitializedName* toCheck,
SgInitializedName* lifetime)
//src/midend/programTransformation/partialRedundancyElimination/pre.h
bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n);
//------------------------ loop handling ---------------------------------
//------------------------------------------------------------------------
//get and set loop control expressions
// 0: init expr, 1: condition expr, 2: stride expr
SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt );
int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp);
bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref);
SgInitializedName * getLoopIndexVar(SgForStatement* forstmt);
//------------------------expressions-------------------------------------
//------------------------------------------------------------------------
//src/midend/programTransformation/partialRedundancyElimination/pre.h
int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root);
//src/midend/astInlining/replaceExpressionWithStatement.h
void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to);
void replaceSubexpressionWithStatement(SgExpression* from,
StatementGenerator* to);
SgExpression* getRootOfExpression(SgExpression* n);
//--------------------------preprocessing info. -------------------------
//------------------------------------------------------------------------
//! Removes all preprocessing information at a given position.
void cutPreprocInfo (SgBasicBlock* b,
PreprocessingInfo::RelativePositionType pos,
AttachedPreprocessingInfoType& save_buf);
//! Pastes preprocessing information at the front of a statement.
void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
//! Pastes preprocessing information at the back of a statement.
void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
/*!
* \brief Moves 'before' preprocessing information.
* Moves all preprocessing information attached 'before' the source
* statement to the front of the destination statement.
*/
// a generic one for all
/// void movePreprocessingInfo(src, dest, RelativePositionType);
void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest);
void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest);
void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest);
//--------------------------------operator--------------------------------
//------------------------------------------------------------------------
from transformationSupport.h, not sure if they should be included here
/* return enum code for SAGE operators */
operatorCodeType classifyOverloadedOperator(); // transformationSupport.h
/*! \brief generates a source code string from operator name.
This function returns a string representing the elementwise operator (for primative types)
that would be match that associated with the overloaded operator for a user-defined
abstractions (e.g. identifyOperator("operator+()") returns "+").
*/
std::string stringifyOperator (std::string name);
//--------------------------------macro ----------------------------------
//------------------------------------------------------------------------
std::string buildMacro ( std::string s ); //transformationSupport.h
//--------------------------------access functions---------------------------
//----------------------------------get/set sth.-----------------------------
// several categories:
* get/set a direct child/grandchild node or fields
* get/set a property flag value
* get a descendent child node using preorder searching
* get an ancestor node using bottomup/reverse searching
// SgName or string?
std::string getFunctionName (SgFunctionCallExp* functionCallExp);
std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression );
// do we need them anymore? or existing member functions are enought?
// a generic one:
std::string get_name (const SgNode* node);
std::string get_name (const SgDeclarationStatement * declaration);
// get/set some property: should moved to SgXXX as an inherent memeber function?
// access modifier
void setExtern (SgFunctionDeclartion*)
void clearExtern()
// similarly for other declarations and other properties
void setExtern (SgVariableDeclaration*)
void setPublic()
void setPrivate()
#endif
// DQ (1/23/2013): Added support for generated a set of source sequence entries.
std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode );
//--------------------------------Type Traits (C++)---------------------------
bool HasNoThrowAssign(const SgType * const inputType);
bool HasNoThrowCopy(const SgType * const inputType);
bool HasNoThrowConstructor(const SgType * const inputType);
bool HasTrivialAssign(const SgType * const inputType);
bool HasTrivialCopy(const SgType * const inputType);
bool HasTrivialConstructor(const SgType * const inputType);
bool HasTrivialDestructor(const SgType * const inputType);
bool HasVirtualDestructor(const SgType * const inputType);
bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType);
bool IsAbstract(const SgType * const inputType);
bool IsClass(const SgType * const inputType);
bool IsEmpty(const SgType * const inputType);
bool IsEnum(const SgType * const inputType);
bool IsPod(const SgType * const inputType);
bool IsPolymorphic(const SgType * const inputType);
bool IsStandardLayout(const SgType * const inputType);
bool IsLiteralType(const SgType * const inputType);
bool IsTrivial(const SgType * const inputType);
bool IsUnion(const SgType * const inputType);
SgType * UnderlyingType(SgType *type);
// DQ (3/2/2014): Added a new interface function (used in the snippet insertion support).
void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList );
// DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators.
bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 );
// JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface
/*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */
struct const_int_expr_t {
size_t value_;
bool hasValue_;
};
struct const_numeric_expr_t {
bool hasValue_;
bool isIntOnly_;
double value_;
};
/*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */
struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr);
struct const_numeric_expr_t evaluateConstNumericExpression(SgExpression *expr);
// JP (9/17/14): Added function to test whether two SgType* are equivalent or not
bool checkTypesAreEqual(SgType *typeA, SgType *typeB);
//--------------------------------Java interface functions ---------------------
#ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
ROSE_DLL_API std::string getTempDirectory(SgProject *project);
ROSE_DLL_API void destroyTempDirectory(std::string);
ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false);
ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string);
ROSE_DLL_API std::string preprocessImport(SgProject *, std::string);
ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true);
ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string);
ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *);
#endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
}// end of namespace
#endif
|
omp_task_red_taskloop.c | // RUN: %libomp-compile-and-run
// Parsing error until gcc8:
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
// Parsing error until clang11:
// UNSUPPORTED: clang-10, clang-9, clang-8, clang-7
// Missing GOMP_taskgroup_reduction_(un)register in LLVM/OpenMP
// Should be removed once the functions are implemented
// XFAIL: gcc-9, gcc-10
#include <stdio.h>
#include <omp.h>
int r;
int work(int k, int l)
{
return k + l + 1;
}
void bar(int i) {
#pragma omp taskgroup task_reduction(+:r)
{ int th_gen = omp_get_thread_num();
#pragma omp task in_reduction(+:r) firstprivate(i, th_gen)
{
r += work(i, 0);
printf("executing task (%d, 0), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen);
}
#pragma omp task in_reduction(+:r) firstprivate(i, th_gen)
{
r += work(i, 1);
printf("executing task (%d, 1), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen);
}
}
}
int foo() {
int i;
int th_gen = omp_get_thread_num();
#pragma omp taskgroup task_reduction(+:r)
{
bar(0);
}
printf("th %d passed bar0\n", th_gen);
#pragma omp taskloop reduction(+:r) firstprivate(th_gen)
for (i = 1; i < 4; ++i) {
bar(i);
printf("th %d (gen by th %d) passed bar%d in taskloop\n", omp_get_thread_num(), th_gen, i);
#pragma omp task in_reduction(+:r)
r += i;
}
return 0;
}
// res = 2*((1+2)+(2+3)+(3+4)+(4+5)+1+2+3) = 60
#define res 60
int main()
{
r = 0;
#pragma omp parallel num_threads(2)
foo();
if (r == res) {
return 0;
} else {
printf("error r = %d (!= %d)\n", r, res);
return 1;
}
}
|
vector_list_math.c | #include "vector_list_math.h"
#include "../../vector/common/common_vector_math.h"
#include "../../vector/sparse/sparse_vector_math.h"
#include "../../global_defs.h"
#include <stdlib.h>
void update_vector_list_lengths(struct sparse_vector* vector_array
, uint64_t no_clusters
, uint32_t* cluster_not_changed
, VALUE_TYPE* vector_lengths) {
uint64_t i;
#pragma omp parallel for schedule(dynamic, 1000)
for (i = 0; i < no_clusters; i++) {
if (!cluster_not_changed[i]) {
vector_lengths[i] = calculate_squared_vector_length(vector_array[i].values
, vector_array[i].nnz);
}
}
}
void calculate_vector_list_lengths(struct sparse_vector* vector_array
, uint64_t no_clusters
, VALUE_TYPE** vector_lengths) {
uint64_t i;
*vector_lengths = (VALUE_TYPE*) calloc(no_clusters, sizeof(VALUE_TYPE));
#pragma omp parallel for schedule(dynamic, 1000)
for (i = 0; i < no_clusters; i++) {
(*vector_lengths)[i] = calculate_squared_vector_length(vector_array[i].values
, vector_array[i].nnz);
}
}
void create_block_vectors_list_from_vector_list(struct sparse_vector *mtrx
, uint64_t no_blocks
, uint64_t no_vectors
, uint64_t dim
, struct sparse_vector **block_vectors) {
uint64_t i, keys_per_block;
keys_per_block = dim / no_blocks;
if (dim % no_blocks > 0) keys_per_block++;
*block_vectors = (struct sparse_vector *) calloc(no_vectors, sizeof(struct sparse_vector));
#pragma omp parallel for schedule(dynamic, 1000)
for (i = 0; i < no_vectors; i++) {
(*block_vectors)[i].nnz = get_blockvector_nnz(mtrx[i].keys
, mtrx[i].values
, mtrx[i].nnz
, keys_per_block);
if ((*block_vectors)[i].nnz > 0) {
(*block_vectors)[i].keys = (KEY_TYPE*) calloc((*block_vectors)[i].nnz, sizeof(KEY_TYPE));
(*block_vectors)[i].values = (VALUE_TYPE*) calloc((*block_vectors)[i].nnz, sizeof(VALUE_TYPE));
fill_blockvector(mtrx[i].keys
, mtrx[i].values
, mtrx[i].nnz
, keys_per_block
, (*block_vectors)[i].keys
, (*block_vectors)[i].values
, &((*block_vectors)[i].nnz));
}
}
}
void update_changed_blockvectors(struct sparse_vector *mtrx
, uint64_t no_blocks
, uint64_t no_vectors
, uint64_t dim
, uint32_t* vector_not_changed
, struct sparse_vector *block_vectors) {
uint64_t i, keys_per_block;
keys_per_block = dim / no_blocks;
if (dim % no_blocks > 0) keys_per_block++;
#pragma omp parallel for schedule(dynamic, 1000)
for (i = 0; i < no_vectors; i++) {
if (vector_not_changed[i]) continue;
free_null(block_vectors[i].keys);
free_null(block_vectors[i].values);
block_vectors[i].nnz = get_blockvector_nnz(mtrx[i].keys
, mtrx[i].values
, mtrx[i].nnz
, keys_per_block);
if (block_vectors[i].nnz > 0) {
block_vectors[i].keys = (KEY_TYPE*) calloc(block_vectors[i].nnz, sizeof(KEY_TYPE));
block_vectors[i].values = (VALUE_TYPE*) calloc(block_vectors[i].nnz, sizeof(VALUE_TYPE));
fill_blockvector(mtrx[i].keys
, mtrx[i].values
, mtrx[i].nnz
, keys_per_block
, block_vectors[i].keys
, block_vectors[i].values
, &(block_vectors[i].nnz));
}
}
}
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float *m = calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/(float)RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(TA && !TB)
gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(!TA && TB)
gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else
gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
#ifdef GPU
#include <math.h>
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
check_error(status);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_gpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaThreadSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,192,729,1600);
time_gpu(0,0,384,196,1728);
time_gpu(0,0,256,196,3456);
time_gpu(0,0,256,196,2304);
time_gpu(0,0,128,4096,12544);
time_gpu(0,0,128,4096,4096);
*/
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,576,12544);
time_gpu(0,0,256,2304,784);
time_gpu(1,1,2304,256,784);
time_gpu(0,0,512,4608,196);
time_gpu(1,1,4608,512,196);
return 0;
}
#endif
|
t_initialize_subtree.c | /* ========================================================================== */
/* === GPU/t_initialize_subtree.c =========================================== */
/* ========================================================================== */
/* -----------------------------------------------------------------------------
* CHOLMOD/GPU Module. Copyright (C) 2005-2012, Timothy A. Davis
* The CHOLMOD/GPU Module is licensed under Version 2.0 of the GNU
* General Public License. See gpl.txt for a text of the license.
* CHOLMOD is also available under other licenses; contact authors for details.
* http://www.suitesparse.com
* -------------------------------------------------------------------------- */
/*
* File:
* t_initialize_subtree
*
* Description:
* Contains functions for initializing
* subtrees of the elimination tree.
*
*/
/* includes */
#include "cholmod_template.h"
#include <string.h>
#include <time.h>
/*
* Function:
* query_gpu
*
* Description:
* Queries GPU properties (clock speed, # SMs, etc.)
*/
void TEMPLATE2(CHOLMOD(query_gpu)) (int *clockRate, int *sm, int *ipc, int gpuid)
{
#ifdef SUITESPARSE_CUDA
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, gpuid);
*clockRate = prop.clockRate;
*sm = prop.multiProcessorCount;
*ipc = 64*2; /* 64DP ALUs x 2DP (1DP FMA per cycle) */
PRINTF("GPU Info:\n");
PRINTFV("\tclock rate: %d\n",*clockRate);
PRINTFV("\t# SMs: %d\n",*sm);
PRINTFV("\tipc: %d\n",*ipc);
PRINTFV("\tname: %s\n",prop.name);
#endif
}
/*
* Function:
* query_cpu
*
* Description:
* Queries CPU properties (clock speed, # SMs, etc.)
*/
void TEMPLATE2(CHOLMOD(query_cpu)) (int *clockRate, int *sm, int *ipc, int numThreads)
{
*clockRate = 2300000; /* clock speed in kilohertz*/
*sm = numThreads/2; /* # cores (16 cores) */
*ipc = 16; /* 16DP instructions per cycle */
if(*sm == 0) *sm = 1;
PRINTF("CPU Info:\n");
PRINTFV("\tclock rate: %d\n",*clockRate);
PRINTFV("\t# cores: %d\n",*sm);
PRINTFV("\tipc: %d\n",*ipc);
}
/*
* Function:
* binarysearch_tree
*
* Description:
* Performs binary search to find ideal subtree sizes for
* the elimination tree. Splits elimination tree into
* subtrees that can be factorized concurrently.
*
*/
void TEMPLATE2 (CHOLMOD (binarysearch_tree))
(
cholmod_common *Common,
cholmod_sparse *A,
cholmod_factor *L,
cholmod_global_pointers *gb_p,
cholmod_cpu_pointers *cpu_p,
cholmod_tree_pointers *tree_p,
Int *LpxSub
)
{
/* local variables */
Int s, n, nls, numSuper, subtree, subtreeSize, subtreeSizeDiff, subtreeSizePrev, max_factor_size;
Int *supernode_children_num, *supernode_children_num2, *supernode_children_count2,
*supernode_levels_subtree_ptrs, *supernode_subtree_ptrs, *level_num_desc_ptrs,
*level_descendants_ptrs, *Lpi;
size_t gpu_memtot, cpu_memtot, size_A;
int search, binarySearch;
Int counts[3];
/*
* Set variables & pointers
*/
/* set host variables */
n = L->n;
search = 0;
gpu_memtot = 0;
cpu_memtot = 0;
/* set host pointers */
Lpi = cpu_p->Lpi;
/* set tree pointers */
supernode_children_num = tree_p->supernode_children_num;
supernode_children_num2 = tree_p->supernode_children_num2;
supernode_children_count2 = tree_p->supernode_children_count2;
supernode_levels_subtree_ptrs = tree_p->supernode_levels_subtree_ptrs;
supernode_subtree_ptrs = tree_p->supernode_subtree_ptrs;
level_num_desc_ptrs = tree_p->level_num_desc_ptrs;
level_descendants_ptrs = tree_p->level_descendants_ptrs;
/*
* Determine whether to use root only:
* Calculate size of Ai, Ap, Ax. If size is larger
* than Common->dev_mempool_size (device memory) then
* use only root tree, not our GPU subtrees.
*/
nls = Lpi[L->nsuper] - Lpi[0];
size_A = (nls + A->ncol + A->nzmax + 2)*sizeof(Int) + A->nzmax*sizeof(double);
if(size_A >= Common->dev_mempool_size && gb_p->runType != 1) {
gb_p->runType = 3; /* use only root */
}
/* set maximum BRANCH_SIZE (cutoff) */
if(gb_p->runType == 1) subtreeSize = L->xsize + 1;
else subtreeSize = L->xsize - 1; /* initial subtree size (at least one supernode on root alg.) */
subtreeSizePrev = 0;
subtreeSizeDiff = 0;
search = 0;
subtree = 0;
binarySearch = (int)(BINARY_SEARCH);
/* case if factor (subtree size) is larger than GPU memory available */
if(subtreeSize > (Int)(Common->dev_mempool_size / 8)) {
subtreeSize = (Int)(Common->dev_mempool_size / 8);
}
/* Binary Search loop
* conditions:
* 1. BINARY_SEARCH steps reached
* 2. factor fits GPU memory
* 3. factor fits CPU (pinned) memory
*/
while(search <= binarySearch || gpu_memtot > Common->dev_mempool_size || cpu_memtot > Common->dev_mempool_size) {
/* case binary search could not find small enough subtree to fit in GPU, use root only.. */
if ( subtreeSize == 1 ) {
PRINTF("subtreeSize = 1, use root only..\n");
/* no subtree fits GPU, so use root only */
gb_p->runType = 3;
/* set maximum BRANCH_SIZE (cutoff) */
subtreeSize = L->xsize - 1; /* initial subtree size (at least one supernode on root alg.) */
subtreeSizePrev = 0;
subtreeSizeDiff = 0;
search = 0;
subtree = 0;
/* case if factor (subtree size) is larger than GPU memory available */
if(subtreeSize > (Int)(Common->dev_mempool_size / 8)) {
subtreeSize = (Int)(Common->dev_mempool_size / 8);
}
}
/* clear local variables */
gb_p->numSubtree = 0; /* # subtrees in tree */
max_factor_size = 0; /* max factor size in any subtree */
gb_p->maxCsize = 0; /* max C size in batch & streams in any level in any subtree */
gb_p->maxndesc = 0; /* max # descendants in batch & streams in any level in any subtree */
gb_p->maxbatch = 0; /* max batch size (# supernodes) in any level */
counts[0] = 0;
counts[1] = 0;
counts[2] = 0;
/*
* Store subtrees of tree
*
* Description:
* traverse through tree in two manners:
* 1. DESCEND if supernode has children that haven't been touched (added to subtree)
* 2. ASCEND if supernode has no children or if all have been touched (added to subtree)
*
* Lastly, store supernodes in last subtree of tree that does not fit GPU
*/
/* copy # children per supernode */
memcpy(supernode_children_num, supernode_children_num2, L->nsuper*sizeof(Int));
/* reset counters */
memset(supernode_children_count2, 0, L->nsuper*sizeof(Int));
TEMPLATE2 (CHOLMOD (build_subtree))( L,
gb_p,
tree_p,
subtreeSize);
/*
* pre-processing subtrees
*
* Description:
* 1. Order subtrees by levels
* 2. Find amount of GPU memory needed for batching
* 3. Find batching cutoff (up to what level to batch over supernodes)
*/
//memset (LpxSub, -1, sizeof(Int) * L->nsuper);
int numThreads = Common->ompNumThreads;
#pragma omp parallel for num_threads(numThreads)
for (s = 0; s < L->nsuper; s++)
{
LpxSub[s] = -1;
}
gb_p->ScoreSizeFactorized = sizeof(struct cholmod_descendant_score_t) * L->n;
gb_p->MapSizeFactorized = sizeof(Int) * L->n;
gb_p->LxSizeFactorized = 0;
/* loop over subtrees */
for(subtree = 0; subtree < gb_p->numSubtree; subtree++) {
numSuper = supernode_subtree_ptrs[subtree+1] - supernode_subtree_ptrs[subtree]; /* get # of supernodes in subtree */
/* copy # childrens per supernode */
memcpy(supernode_children_num, supernode_children_num2, L->nsuper*sizeof(Int));
level_num_desc_ptrs[subtree] = counts[0];
level_descendants_ptrs[subtree] = counts[1];
supernode_levels_subtree_ptrs[subtree] = counts[2];
/* get # children in root supernode of root tree */
TEMPLATE2 (CHOLMOD (get_children_root))( Common,
gb_p,
tree_p,
numSuper,
subtree);
/* get size of factor (Lx) in current subtree */
TEMPLATE2 (CHOLMOD (get_factor_size))( gb_p,
cpu_p,
tree_p,
numSuper,
subtree,
&max_factor_size,
LpxSub);
/* get current subtree size/info */
TEMPLATE2 (CHOLMOD (process_subtree))( Common,
A,
L,
gb_p,
cpu_p,
tree_p,
n,
numSuper,
subtree,
max_factor_size,
counts);
} /* end loop over subtrees */
/*
* find amount GPU memory needed for subtreeing
*
* Description:
* calculates total amount of GPU memory needed for current BRANCH_SIZE.
* If larger then reduce subtree size, if smaller increase subtree size.
*/
nls = Lpi[L->nsuper] - Lpi[0];
gb_p->LxSize = (max_factor_size)*sizeof(double); /* size of factor */
gb_p->CSize = (gb_p->maxCsize)*sizeof(double); /* size of C buffer */
gb_p->LsSize = (nls+1)*sizeof(Int);
gb_p->MapSize = (n+1)*sizeof(Int)*(gb_p->maxbatch); /* size of Map */
gb_p->ApSize = (A->ncol+1)*sizeof(Int);
gb_p->AiSize = A->nzmax*sizeof(Int);
gb_p->AxSize = A->nzmax*sizeof(double);
gb_p->dimDescSize = (gb_p->maxndesc)*sizeof(Int); /* size of dimension arrays for desc. */
gb_p->ptrDescSize = (gb_p->maxndesc)*sizeof(double *); /* size of pointer arrays for desc. */
gb_p->dimSuperSize = sizeof(Int)*(gb_p->maxbatch); /* size of dimension arrays for super. */
gb_p->ptrSuperSize = sizeof(double *)*(gb_p->maxbatch); /* size of pointer arrays for super. */
/* size of Ap, Ai, Ax buffers (0 if GPU subtrees not used) */
if(gb_p->runType != 1 && gb_p->runType != 3) size_A = gb_p->ApSize + gb_p->AiSize + gb_p->AxSize; /* if not root and not CPU only */
else size_A = 0;
/* total amount of GPU memory needed */
gpu_memtot = IBUFF_LOOPSIZE * (gb_p->LxSizeFactorized + MAP_CACHESIZE * gb_p->MapSizeFactorized)
+ gb_p->LxSize + gb_p->CSize + gb_p->LsSize + gb_p->MapSize + size_A + (14*(gb_p->dimDescSize) + 6*(gb_p->ptrDescSize) + 13*(gb_p->dimSuperSize) + 3*(gb_p->ptrSuperSize))
+ 2*(gb_p->maxbatch)*sizeof(Int) + sizeof(Int);
/* total amount of CPU memory needed (pinned memory) */
cpu_memtot = IBUFF_LOOPSIZE * gb_p->LxSizeFactorized + gb_p->ScoreSizeFactorized
+ gb_p->LxSize + (14*(gb_p->dimDescSize) + 6*(gb_p->ptrDescSize) + 13*(gb_p->dimSuperSize) + 3*(gb_p->ptrSuperSize));
/* print memory info */
PRINTFV("binary step: %d\n",search);
PRINTFV("\trunType: %ld \n", gb_p->runType);
PRINTFV("\tA->nzmax: %ld \n", A->nzmax);
PRINTFV("\tLxSize: %ld \n", gb_p->LxSize);
PRINTFV("\tCSize: %ld \n", gb_p->CSize);
PRINTFV("\tMapSize: %ld \n", gb_p->MapSize);
PRINTFV("\tLsSize: %ld \n", gb_p->LsSize);
PRINTFV("\tApSize: %ld \n", gb_p->ApSize);
PRINTFV("\tAiSize: %ld \n", gb_p->AiSize);
PRINTFV("\tAxSize: %ld \n", gb_p->AxSize);
PRINTFV("\tbatch lists: %ld \n", (14*(gb_p->dimDescSize) + 6*(gb_p->ptrDescSize) + 13*(gb_p->dimSuperSize) + 3*(gb_p->ptrSuperSize)) + 2*(gb_p->maxbatch)*sizeof(Int));
PRINTFV("\tcpu_mem_available: %ld \n",Common->dev_mempool_size);
PRINTFV("\tcpu_mem_used: %ld \n",cpu_memtot);
PRINTFV("\tgpu_mem_available: %ld \n",Common->dev_mempool_size);
PRINTFV("\tgpu_mem_used: %ld \n",gpu_memtot);
PRINTF("\n\n");
/*
* Update size of subtree.
*
* Update subtreeSize by subtreeSizeDiff amount, where subtreeSizeDiff is half the difference
* between the previous and current size. Increase if the subtreeSize is smaller than the available
* GPU (or CPU) memory, and decrease otherwise. Also store the current subtree size as subtreeSizePrev.
*/
/* Subtree size change to update. Half the difference between the previous and current subtree size. */
subtreeSizeDiff = (Int)((float)(labs(subtreeSize - subtreeSizePrev))/2.0 + 0.5);
/* Do not let it exceed half the subtree size. */
if ( subtreeSizeDiff > (subtreeSize)/2) {
subtreeSizeDiff = (subtreeSize)/2 ;
}
/* store previous subtree size */
subtreeSizePrev = subtreeSize;
/* update size of subtree */
/* case if exceed GPU or CPU memory, reduce subtree size */
if (gpu_memtot > Common->dev_mempool_size || cpu_memtot > Common->dev_mempool_size) {
subtreeSize -= subtreeSizeDiff;
}
/* case if not exceed GPU nor CPU memory, increase subtree size */
else {
subtreeSize += subtreeSizeDiff;
}
/* break conditions for exiting binary search loop:
* 1. BINARY_SEARCH steps reached
* 2. GPU mem does not exceed limit
* 3. if subtree size converges (BRANCH_SIZE_DIFF == 0)
* 4. if subtree size reaches size of factor or size of factor - 1 (depends on SINGLE_BRANCH)
* 5. if root_only, subtree defaults to only root algorithm
* 6. if CPU_only
*/
if(((gpu_memtot < Common->dev_mempool_size && cpu_memtot < Common->dev_mempool_size) &&
(search >= binarySearch || !(subtreeSizeDiff) || subtreeSize >= L->xsize)) || (gb_p->runType == 1) || (gb_p->runType == 3))
{
break;
}
/* increment binary step */
search++;
} /* end binary search loop*/
}
/*
* Function:
* loadbalance_gpu
*
* Description:
* Load balances subtrees on multiple devices. Four cases:
* 1. CPU only: sends all subtrees to CPU device (with id CHOLMOD_DEVICE_GPU)
* 2. root only: sends all subtrees to root (with id CHOLMOD_DEVICE_GPU+1)
* 3. GPU only: sends subtrees to GPU device (id from 0 to CHOLMOD_DEVICE_GPU-1) & root (id CHOLMOD_DEVICE_GPU+1)
* 4. hybrid: sends subtrees to GPU device (id from 0 to CHOLMOD_DEVICE_GPU-1), CPU device (id CHOLMOD_DEVICE_GPU) & root (id CHOLMOD_DEVICE_GPU+1)
*
* The load-balancing algorithm computes the total work on each device (runtime of all subtrees computed as flop/flops). Then it assigns each subtree
* to the device with least amount of work in a cyclic fashion.
*/
void TEMPLATE2 (CHOLMOD (loadbalance_gpu))
(
cholmod_common *Common,
cholmod_global_pointers *gb_p,
cholmod_tree_pointers *tree_p,
cholmod_loadbalance_pointers *lb_p
)
{
/* structure for device properties */
typedef struct props{
int clockRate;
int sm;
int ipc;
};
/* local variables */
double GPUflops, CPUflops, flop, GPUtime, CPUtime;
double *subtreeSize, *supernode_flop, *workPerDevice;
Int i, j;
int runType, numDevice, numSubtree, numSubtreeProper;
Int s;
Int *supernode_subtree, *supernode_subtree_ptrs, *numSubtreePerDevice, *listSubtreePerDevice;
struct props gpu, cpu;
struct cholmod_subtree_order_t *subtreeReorder;
/*
* Set variables & pointers
*/
/* set variables */
runType = gb_p->runType;
numSubtree = gb_p->numSubtree;
numSubtreeProper = gb_p->numSubtreeProper;
/* set load-balance pointers */
subtreeSize = lb_p->subtreeSize;
numSubtreePerDevice = lb_p->numSubtreePerDevice;
listSubtreePerDevice = lb_p->listSubtreePerDevice;
subtreeReorder = lb_p->subtreeReorder;
workPerDevice = lb_p->workPerDevice;
/* set tree */
supernode_flop = tree_p->supernode_flop;
supernode_subtree = tree_p->supernode_subtree;
supernode_subtree_ptrs = tree_p->supernode_subtree_ptrs;
/* issue less GPUs if not sufficient subtrees */
gb_p->numGPU = Common->numGPU_physical;
/* get number of devices to use:
* 1. GPU only: Common->numGPU_physical
* 2. hybrid: Common->numGPU_physical+1
*/
if(runType == 1) numDevice = 1; /* CPU only */
else if(runType == 2) numDevice = Common->numGPU_physical; /* GPU only */
else numDevice = Common->numGPU_physical + 1; /* GPU + CPU (hybrid) */
/* compute theoretical GPU & CPU flops */
TEMPLATE2(CHOLMOD(query_gpu)) (&(gpu.clockRate), &(gpu.sm), &(gpu.ipc), 0);
TEMPLATE2(CHOLMOD(query_cpu)) (&(cpu.clockRate), &(cpu.sm), &(cpu.ipc), Common->ompNumThreads);
GPUflops = (double)(gpu.clockRate*gpu.sm*gpu.ipc)/(double)(1.0e+6); /* GPU peak theoretical performance (in gflops) */
CPUflops = (double)(cpu.clockRate*cpu.sm*cpu.ipc)/(double)(1.0e+6); /* CPU peak theoretical performance (in gflops) */
PRINTFV("GPU peak flops rate: %f\n",GPUflops);
PRINTFV("CPU peak flops rate: %f\n",CPUflops);
/* Store subtree info (size and id):
* computes the cumulative number of floating-point operations (flop) in each subtree.
*/
for(i = 0; i < numSubtree; i++)
{
subtreeReorder[i].id = i; /* subtree id */
Int numSuper = supernode_subtree_ptrs[i+1] - supernode_subtree_ptrs[i];
/* loop over supernodes in subtree */
for(j = 0; j < numSuper; j++) {
s = supernode_subtree[supernode_subtree_ptrs[i] + j];
subtreeReorder[i].size += supernode_flop[s]; /* subtree size (# flop in all its supernodes) */
}
/* convert to gflop */
subtreeReorder[i].size *= 1.0e-9;
subtreeSize[i] = subtreeReorder[i].size;
}
/* reorder subtrees by size (largest to smallest number of flop) */
qsort(subtreeReorder, numSubtree, sizeof(struct cholmod_subtree_order_t), CHOLMOD(subtree_comp));
/* Set subtrees for each device
* Finds the device with least work and then adds current subtree to it.
* The amount of work in the device (workPerDevice) is computed as the total runtime (flop/flop rate)
* of all the subtrees in the device. The flop rate is the theoretical peak flops of the device (GPU or CPU).
*/
PRINTF("\nSubtree Info:\n");
/* loop over subtrees */
for(i = 0; i < numSubtree; i++)
{
int minDevice = 0;
double min, size;
/* set initial device */
min = workPerDevice[0];
/* case CPU device (CPU only) */
if(runType == 1)
{
minDevice = Common->numGPU_physical; /* set CPU device */
}
/* case root (last subtree) */
else if(subtreeReorder[i].id == numSubtreeProper)
{
minDevice = Common->numGPU_physical + 1; /* set root */
}
/* case GPU or CPU device (GPU only or hybrid) */
else
{
/* find device with least work */
for(j = 1; j < numDevice; j++) {
if(min > workPerDevice[j]) {
min = workPerDevice[j];
minDevice = j; /* set GPU or CPU device */
}
}
}
/* compute size (execution time) for subtree */
flop = subtreeReorder[i].size; /* floating-point operations in subtree */
GPUtime = flop/GPUflops; /* GPU runtime */
CPUtime = flop/CPUflops; /* CPU runtime */
if(minDevice == Common->numGPU_physical) size = CPUtime;
else size = GPUtime;
/* print subtree info */
PRINTFV("device:%d ",minDevice);
PRINTFV("subtree:%d ",subtreeReorder[i].id);
PRINTFV("workPerDevice:%f ",workPerDevice[minDevice]);
PRINTFV("subtreeSize:%f ",subtreeReorder[i].size);
PRINTFV("GPU time:%f ",GPUtime);
PRINTFV("CPU time:%f\n",CPUtime);
/* set subtree for selected device (GPU,CPU,root) */
listSubtreePerDevice[(numSubtreePerDevice[minDevice]++) + minDevice*numSubtree] = (Int)(subtreeReorder[i].id);
workPerDevice[minDevice] += size;
} /* end loop over subtrees */
}
/*
* Function:
* initialize_gpu
*
* Description:
* initializes for GPU algorithm.
*
*/
void TEMPLATE2 (CHOLMOD (initialize_gpu))
(
cholmod_common *Common,
cholmod_factor *L,
cholmod_sparse *A,
cholmod_global_pointers *gb_p,
cholmod_gpu_pointers *gpu_p,
cholmod_cpu_pointers *cpu_p
)
{
#ifdef SUITESPARSE_CUDA
int i, runType, numGPU_physical;
Int s;
/* set variables */
runType = gb_p->runType;
numGPU_physical = gb_p->numGPU;
/* initialize GPU (set pointers, copy memory, etc.)
* only if there are GPU subtrees */
if(runType != 1 && runType != 3) {
int gpuid;
#pragma omp parallel for num_threads(numGPU_physical) private(gpuid)
for (gpuid = 0; gpuid < numGPU_physical; gpuid++)
{
/* get GPU id (omp thread id) */
//int gpuid = omp_get_thread_num();
/* set GPU device */
cudaSetDevice(gpuid);
/* initialize GPU (set pointers, copy memory, etc.) */
TEMPLATE2 (CHOLMOD (gpu_init))( Common,
L,
A,
gb_p,
gpu_p,
gpuid);
}
}
/* Ensure that all GPU initializations are complete */
cudaDeviceSynchronize();
#endif
}
/*
* Function:
* initialize_cpu
*
* Description:
* initializes for root and CPU algorithm.
*
*/
void TEMPLATE2 (CHOLMOD (initialize_cpu))
(
cholmod_common *Common,
cholmod_factor *L,
cholmod_global_pointers *gb_p,
cholmod_cpu_pointers *cpu_p,
cholmod_tree_pointers *tree_p
)
{
int i, runType, numSubtree, numSubtreeProper, numThreads;
Int s;
Int *supernode_subtree, *supernode_subtree_ptrs;
size_t CSize, MapSize;
/* set variables */
runType = gb_p->runType;
numSubtree = gb_p->numSubtree;
numSubtreeProper = gb_p->numSubtreeProper;
numThreads = Common->ompNumThreads;
CSize = (gb_p->CSize);
MapSize = (gb_p->MapSize);
/* set tree pointers */
supernode_subtree = tree_p->supernode_subtree;
supernode_subtree_ptrs = tree_p->supernode_subtree_ptrs;
/* set size of Cbuff */
if(CSize < Common->numGPU_physical * Common->devBuffSize * Common->numGPU_parallel)
CSize = (Common->numGPU_physical+1) * Common->devBuffSize * Common->numGPU_parallel;
if(MapSize < (size_t)(Common->numGPU_physical * L->n*sizeof(Int)))
MapSize = (size_t)(Common->numGPU_physical * L->n*sizeof(Int));
/* clear Lx factor (supernodes used for root alg.) */
Int *Lpx = L->px;
#pragma omp parallel for num_threads(numThreads) private(i, s)
for(i=supernode_subtree_ptrs[numSubtreeProper]; i<supernode_subtree_ptrs[numSubtree]; i++) {
s = supernode_subtree[i];
double *ps = (double *)&cpu_p->Lx[Lpx[s]];
memset(ps, 0, sizeof(double));
}
/* allocate memory for Cbuff & Map (for factorize_cpu_parallel) */
/* if (runType != 3 && runType != 2) */
{
gb_p->CworkSize = (CSize + sizeof(double) - 1)/sizeof(double);
gb_p->MapworkSize = (MapSize + sizeof(Int) - 1)/sizeof(Int);
/* allocate workspace */
gb_p->Cwork = CHOLMOD(malloc) (gb_p->CworkSize, sizeof (double), Common) ;
gb_p->Mapwork = CHOLMOD(malloc) (gb_p->MapworkSize, sizeof (Int), Common) ;
/* check if enough memory */
if (Common->status < CHOLMOD_OK)
{
gb_p->Iwork = CHOLMOD(free) (gb_p->IworkSize, sizeof (Int), gb_p->Iwork, Common) ;
gb_p->Xwork = CHOLMOD(free) (gb_p->XworkSize, sizeof (double), gb_p->Xwork, Common) ;
gb_p->Bwork = CHOLMOD(free) (gb_p->BworkSize, sizeof (struct cholmod_subtree_order_t), gb_p->Bwork, Common) ;
gb_p->Cwork = CHOLMOD(free) (gb_p->CworkSize, sizeof (double), gb_p->Iwork, Common) ;
gb_p->Mapwork = CHOLMOD(free) (gb_p->MapworkSize, sizeof (Int), gb_p->Iwork, Common) ;
return (FALSE) ;
}
cpu_p->C = gb_p->Cwork;
cpu_p->Map = gb_p->Mapwork;
}
}
/*
* Function:
* build_tree
*
* Description:
* builds initial elimination tree
*
*/
void TEMPLATE2 (CHOLMOD (build_tree))
(
cholmod_common *Common,
cholmod_factor *L,
cholmod_global_pointers *gb_p,
cholmod_cpu_pointers *cpu_p,
cholmod_tree_pointers *tree_p
)
{
/* local variables */
Int s, k1, k2, nscol, nsrow, psi, psend, ndcol, ndrow, ndrow1, ndrow2, pdx1, pdi1,
d, dlarge, kd1, kd2, pdi, pdend, pdi2, dancestor, sparent, id, totdesc, idescendant;
Int *Super, *SuperMap, *Lpi, *Ls, *Head, *Next, *Lpos, *supernode_root, *supernode_children,
*supernode_children_count, *supernode_children_num, *supernode_children_ptrs,
*supernode_parent, *supernode_size, *supernode_size_desc, *ndescendants;
Int childrenPtrs = 0, count;
double syrkflops,gemmflops,potrfflops,trsmflops;
double *supernode_flop;
/* set host pointers */
Super = cpu_p->Super;
SuperMap = cpu_p->SuperMap;
Lpi = cpu_p->Lpi;
Ls = cpu_p->Ls;
Head = cpu_p->Head;
Next = cpu_p->Next;
Lpos = cpu_p->Lpos;
/* set tree pointers */
supernode_root = tree_p->supernode_root;
supernode_children = tree_p->supernode_children;
supernode_children_num = tree_p->supernode_children_num;
supernode_children_count = tree_p->supernode_children_count;
supernode_children_ptrs = tree_p->supernode_children_ptrs;
supernode_parent = tree_p->supernode_parent;
supernode_size = tree_p->supernode_size;
supernode_size_desc = tree_p->supernode_size_desc;
ndescendants = tree_p->ndescendants;
supernode_flop = tree_p->supernode_flop;
/*
* Get info of tree:
* Gathers info of the tree. Visit all
* supernoeds and collects three things:
* 1. size
* 2. parent
* 3. # children
*
*/
/* loop over supernodes */
for(s = 0; s < L->nsuper; s++) {
/* clear variables */
totdesc=0;
idescendant=0;
syrkflops = 0.0;
gemmflops = 0.0;
potrfflops = 0.0;
trsmflops = 0.0;
/* get supernode dimensions */
k1 = Super[s] ;
k2 = Super[s+1] ;
nscol = k2 - k1 ;
psi = Lpi[s] ;
psend = Lpi[s+1];
nsrow = psend - psi;
/* store maximum nsrow & nscol in tree*/
if(nsrow > gb_p->maxnsrow) gb_p->maxnsrow = nsrow;
if(nscol > gb_p->maxnscol) gb_p->maxnscol = nscol;
/* get number of descendants in supernode */
TEMPLATE2 (CHOLMOD (gpu_num_descendants))( Common, cpu_p, tree_p, s);
/* get current supernode */
dlarge = Head[s];
/* loop over descendants of supernode */
while(idescendant < ndescendants[s]) {
/* get current descendant */
d = dlarge;
dlarge = Next[dlarge];
/* increment descendant count */
idescendant++;
/* get descendant dimensions */
kd1 = Super [d] ;
kd2 = Super [d+1] ;
ndcol = kd2 - kd1 ;
pdi = Lpi [d] ;
pdend = Lpi [d+1] ;
ndrow = pdend - pdi ;
pdi1 = pdi + Lpos[d];
for (pdi2 = pdi1 ; pdi2 < pdend && Ls [pdi2] < k2 ; pdi2++) ;
ndrow1 = pdi2 - pdi1 ;
ndrow2 = pdend - pdi1 ;
/* get next descendant */
Lpos [d] = pdi2 - pdi ;
if (Lpos [d] < ndrow) {
dancestor = SuperMap [Ls [pdi2]] ;
//#pragma omp critical (head_next)
{
Next [d] = Head [dancestor] ;
Head [dancestor] = d ;
}
}
/* cumulate total size of all descendants in current supernode */
totdesc += ndrow2*ndrow1;
/* compute syrk & gemm flops in current descendant */
syrkflops += (double)(ndrow1*ndrow1*ndcol);
gemmflops += (double)(2.0*(ndrow2-ndrow1)*ndrow1*ndcol);
} /* end loop over descendants */
/* compute potrf & trsm flops in current supernode */
potrfflops = (double)(nscol*nscol*nscol/3.0);
trsmflops = (double)((nsrow-nscol)*nscol*nscol);
/* get next supernode */
if(nsrow > nscol) {
Lpos [s] = nscol ;
sparent = SuperMap [Ls [psi + nscol]] ;
//#pragma omp critical (head_next)
{
Next [s] = Head [sparent] ;
Head [sparent] = s ;
}
}
Head [s] = EMPTY ;
/* store tree information */
supernode_size_desc[s] = totdesc; /* store total size of all descendants in current supernode */
supernode_size[s] += totdesc; /* store total size of current supernode */
supernode_flop[s] = syrkflops+gemmflops+potrfflops+trsmflops; /* store total flops in current supernode */
if(nsrow > nscol) { /* case if supernode has parent */
sparent = SuperMap[Ls [psi + nscol]] ;
supernode_size[sparent] += supernode_size[s]; /* add supernode's size to its parent */
supernode_parent[s] = sparent; /* store supernode's parent */
supernode_children_num[sparent]++; /* increment # of children of supernode's parent */
}
else { /* case if supernode has no parent */
supernode_parent[s] = EMPTY;
}
} /* end loop over supernodes */
/*
* Store children of tree:
* Builds elimination tree. Visits
* all supernodes and stores their
* children.
*/
/* loop over supernodes */
for(s = 0; s < L->nsuper; s++) {
sparent = supernode_parent[s];
if(sparent > 0) { /* case if supernode has parent */
count = supernode_children_count[sparent]; /* get # children the supernode's parent has */
if(!count) { /* case if supernode does not have siblings (its parent has no other descendants) */
supernode_children_ptrs[sparent] = childrenPtrs; /* set children pointer to child */
}
/* store children info */
id = supernode_children_ptrs[sparent] + count; /* index to store child (# siblings) */
supernode_children[id] = s; /* store supernode as a child */
supernode_children_count[sparent]++; /* increment # siblings of supernode (or # children (descendants) parent has) */
if(!count)
childrenPtrs += supernode_children_num[sparent]; /* increment pointer to children */
}
else { /* case if supernode has no parent (it is the root of a tree) */
supernode_root[(gb_p->numRoot)++] = s; /* store roots of trees */
}
} /* end loop over supernodes */
}
/*
* Function:
* build_subtree
*
* Description:
* builds a subtree of the elimination tree
*
*/
void TEMPLATE2 (CHOLMOD (build_subtree))
(
cholmod_factor *L,
cholmod_global_pointers *gb_p,
cholmod_tree_pointers *tree_p,
Int subtreeSize
)
{
/* local variables */
Int i, j, s, id;
Int *supernode_root, *supernode_children, *supernode_children_ptrs, *supernode_children_num, *supernode_children_count2,
*supernode_parent, *supernode_subtree, *supernode_subtree_ptrs, *supernode_size;
int subtree, first, numRoot, runType;
/* set variables */
j = 0;
gb_p->numSubtree = 0;
numRoot = gb_p->numRoot;
runType = gb_p->runType;
/* set tree pointers */
supernode_root = tree_p->supernode_root;
supernode_children = tree_p->supernode_children;
supernode_children_ptrs = tree_p->supernode_children_ptrs;
supernode_children_num = tree_p->supernode_children_num;
supernode_children_count2 = tree_p->supernode_children_count2;
supernode_parent = tree_p->supernode_parent;
supernode_subtree = tree_p->supernode_subtree;
supernode_subtree_ptrs = tree_p->supernode_subtree_ptrs;
supernode_size = tree_p->supernode_size;
/*
* Build subtrees of tree:
*
* traverse tree and store supernodes in
* corresponding subtrees. Use depth-first
* traversal.
* Steps:
* 1. start from root supernode (there can be
* multiple roots)
*
* 2. descend tree until base (leaves) of tree reached
* (where supernodes have no children)
*
* 3. add supernode to subtree (subtree) if:
* a. supernode size < subtree size
* b. supernode has no children, or all
* children have already been visited
*
* 4. ascend tree if:
* a. supernode has no children
* b. all children visited
*
* Use variable 'first' to determine when a
* new subtree starts. Set 'first' to head
* (root) of subtree (subtree) and stop when
* 's' =='first', which means we've returned
* to the head supernode of the subtree.
* Increment the # subtrees whenever this happens.
* */
/* loop over all roots (trees) */
for(i=0; i<numRoot; i++) {
s = tree_p->supernode_root[i]; /* set root of tree */
if (tree_p->factorized[s]) continue;
first = 0;
/* loop: traverse (depth-first) through supernodes of current tree */
while(1) {
/* if returned to first supernode in subtree (exit condition) */
if(first == s) {
first = 0;
}
/* case: store supernodes in subtree only if:
* 1. size of supernode is smaller than size of subtree
* note that supernode_size contains size of all its descendants (children)
* 2. there are at least SUPERNODE_MIN supernods in the elimination tree (root_only = 0)
* 3. Ai,Ap,Ax are small enough to fit device (root_only = 0)
*/
if(supernode_size[s] <= subtreeSize && (runType != 3) && (runType != 1)) {
/* case: if first supernode in subtree (root of subtree) */
if(first == 0) {
first = supernode_parent[s]; /* store first supernode */
supernode_subtree_ptrs[(gb_p->numSubtree)++] = j; /* set pointer to current subtree */
}
/* case: if supernode has no children */
if(supernode_children_count2[s]==0) {
supernode_subtree[j++] = s; /* store supernode into subtree */
}
}
/* case: descend to next child (traverse down tree)
* if supernode has children that haven't been added to the subtree
*/
if(supernode_children_count2[s] < supernode_children_num[s]) {
id = supernode_children_ptrs[s] + supernode_children_count2[s]; /* get id of children of the supernode */
supernode_children_count2[s]++; /* increment children count */
s = supernode_children[id]; /* get id of the child (descendant) */
}
/* case: ascend to parent (traverse up tree)
* if supernode has no children or all children have been added to subtree
* and supernode is not a root (since roots have no parents..)
*/
else if (s != supernode_root[i]){
s = supernode_parent[s]; /* get id of the parent */
}
/* exit if root of tree reached */
if(s == supernode_root[i] && supernode_children_count2[s] == supernode_children_num[s]) {
break;
}
} /* end loop to traverse tree */
} /* end loop over trees */
/*
* Build last (root) subtree of tree:
*
* store supernodes that do not fit GPU ( > subtree size)
* into last subtree (root subtree). These supernodes are
* typically located at the top of the tree.
*/
gb_p->has_root = FALSE;
supernode_subtree_ptrs[(gb_p->numSubtree)] = j; /* set poiner to last subtree */
gb_p->numSubtreeProper = gb_p->numSubtree;
for(s=0; s < L->nsuper; s++) { /* loop over supernodes */
/* case if size of candidate subtree > cutoff subtree size */
if(supernode_size[s] > subtreeSize || (runType == 3) || (runType == 1)) {
gb_p->has_root = TRUE;
supernode_subtree[j++] = s; /* store supernode in subtree */
}
} /* end loop over supernodes */
/* set pointer for end of last subtree */
if (gb_p->has_root == TRUE)
supernode_subtree_ptrs[++(gb_p->numSubtree)] = j;
}
/*
* Function:
* get_children_root
*
* Description:
* stores # children on root supernodes for last (root) subtree
* builds a subtree of the elimination tree
*
*/
void TEMPLATE2 (CHOLMOD (get_children_root))
(
cholmod_common *Common,
cholmod_global_pointers *gb_p,
cholmod_tree_pointers *tree_p,
Int numSuper,
Int subtree
)
{
/* local variables */
Int i, j, k, s;
Int *supernode_children, *supernode_children_ptrs, *supernode_subtree, *supernode_subtree_ptrs, *supernode_children_num;
int num, child, numSubtree, numSubtreeProper, numThreads;
/* set variables */
numSubtree = gb_p->numSubtree;
numSubtreeProper = gb_p->numSubtreeProper;
numThreads = Common->ompNumThreads;
/* set tree poitners */
supernode_children = tree_p->supernode_children;
supernode_children_ptrs = tree_p->supernode_children_ptrs;
supernode_subtree = tree_p->supernode_subtree;
supernode_subtree_ptrs = tree_p->supernode_subtree_ptrs;
supernode_children_num = tree_p->supernode_children_num;
/*
* Get children on root supernodes:
* get # of children on root supernodes for root (last) subtree.
*
*/
/* case if root (last) subtree */
if(subtree == numSubtreeProper) {
/* loop over supernodes */
#pragma omp parallel for num_threads(numThreads) private (i, j, k, s, child, num)
for(i = 0; i < numSuper; i++) {
/* get supernode id */
s = supernode_subtree[supernode_subtree_ptrs[subtree] + i];
num = 0;
/* loop over children of supernode */
for(j = 0; j < supernode_children_num[s]; j++) {
/* get children id */
child = supernode_children[supernode_children_ptrs[s] + j];
/* loop over supernodes in last subtree */
for(k=0; k < numSuper; k++) {
if(child == supernode_subtree[supernode_subtree_ptrs[subtree] + k]) { /* case: is it a child? */
num++;
}
} /* end loop over supernodes */
} /* end loop over children*/
supernode_children_num[s] = num; /* store # children supernode has in last subtree */
} /* end supernode loop */
}
}
/*
* Function:
* process_subtree
*
* Description:
* processes a subtree of the elimination tree. Stores supernodes
* in levels, and finds the maximum batch size for each level,
* given a fixed amount of device memory.
*
*/
void TEMPLATE2 (CHOLMOD (process_subtree))
(
cholmod_common *Common,
cholmod_sparse *A,
cholmod_factor *L,
cholmod_global_pointers *gb_p,
cholmod_cpu_pointers *cpu_p,
cholmod_tree_pointers *tree_p,
Int n,
Int numSuper,
Int subtree,
Int max_factor_size,
Int *counts)
{
/* local variables */
Int batchdescflag, desc, count0, count1, count2, nsupernodes, stream, batch, Csize, ndesc,
maxsubtreeCsize, maxsubtreendesc, maxsubtreebatch, maxnumdescendantsperlevel, nbatch,
maxsubtreeCsize_prev, maxsubtreendesc_prev, maxsubtreebatch_prev, maxnumdescendantsperlevel_prev, nbatch_prev, runType;
Int s, i, processed_nodes, node, num_levels, sparent;
Int *Lpi, *supernode_levels, *supernode_levels_ptrs, *supernode_levels_subtree_ptrs, *supernode_subtree, *supernode_subtree_ptrs,
*level_descendants, *supernode_children_num, *supernode_parent, *supernode_size_desc, *supernode_num_levels,
*level_num_desc, *supernode_batch, *ndescendants;
size_t nls, LxSize, CSize, LsSize, MapSize, ApSize, AiSize, AxSize, dimDescSize, ptrDescSize, dimSuperSize, ptrSuperSize,
gpu_memtot, gpu_memtot_prev;
size_t LxSizeFactorizedMax;
/* set variables */
processed_nodes = 0;
num_levels = 0;
count0 = counts[0];
count1 = counts[1];
count2 = counts[2];
runType = gb_p->runType;
/* set host pointers */
Lpi = cpu_p->Lpi;
/* set tree pointers */
supernode_levels = tree_p->supernode_levels;
supernode_levels_ptrs = tree_p->supernode_levels_ptrs;
supernode_levels_subtree_ptrs = tree_p->supernode_levels_subtree_ptrs;
supernode_subtree = tree_p->supernode_subtree;
supernode_subtree_ptrs = tree_p->supernode_subtree_ptrs;
level_descendants = tree_p->level_descendants;
supernode_children_num = tree_p->supernode_children_num;
supernode_parent = tree_p->supernode_parent;
supernode_size_desc = tree_p->supernode_size_desc;
supernode_num_levels = tree_p->supernode_num_levels;
level_num_desc = tree_p->level_num_desc;
supernode_batch = tree_p->supernode_batch;
ndescendants = tree_p->ndescendants;
/*
* Process subtree:
* First store all supernodes within
* levels. Then visit all supernodes
* in a level and get the amount of
* memory needed for batching them.
*/
for(i=0; i < numSuper; i++) {
s = supernode_subtree[supernode_subtree_ptrs[subtree] + i];
if (tree_p->factorized[s] > 0)
{
Int d, kd1, kd2, ndcol, pdi, pdend, pdi1, ndrow, ndrow2;
d = s;
kd1 = cpu_p->Super [d] ;
kd2 = cpu_p->Super [d+1] ;
ndcol = kd2 - kd1 ;
pdi = cpu_p->Lpi [d] ;
pdend = cpu_p->Lpi [d+1] ;
ndrow = pdend - pdi ;
pdi1 = pdi + cpu_p->Lpos[d];
ndrow2 = pdend - pdi1 ;
tree_p->parent_subtree[s] = subtree;
LxSizeFactorizedMax = sizeof(double) * ndcol * ndrow2;
if (gb_p->LxSizeFactorized < LxSizeFactorizedMax)
{
gb_p->LxSizeFactorized = LxSizeFactorizedMax;
}
}
if (tree_p->factorized[s])
{
if (tree_p->factorized[s] > 0)
tree_p->factorized[s] = 1;
if (tree_p->factorized[s] < 0)
tree_p->factorized[s] = -1;
processed_nodes++; /* increment processed supernode coutner */
supernode_children_num[s] = EMPTY; /* empty children of supernode */
sparent = supernode_parent[s];
if (sparent != EMPTY)
supernode_children_num[sparent]--;
}
}
/* loop over levels in subtree (until all supernodes are processed) */
while(processed_nodes != numSuper) {
/* reset variables */
nsupernodes = 0;
batchdescflag = 0;
desc = 0;
stream = 0;
batch = 0;
Csize = 0;
ndesc = 0;
maxsubtreeCsize = 0;
maxsubtreendesc = 0;
/* Store supernods in current level:
* This just involves selecting supernodes that have no
* children (belong to the current level).
*/
/* pointer to levels in subtree */
supernode_levels_ptrs[supernode_levels_subtree_ptrs[subtree]+num_levels] = count2;
/* loop over supernodes */
for(i=0; i < numSuper; i++) {
s = supernode_subtree[supernode_subtree_ptrs[subtree] + i];
/* store supernodes that belong to current level */
if (tree_p->factorized[s] == 0 && supernode_children_num[s] == 0)
{ /* case supernode has no children (belongs to current level) */
supernode_levels[count2++] = s; /* store supernode in level */
nsupernodes++; /* increment # supernodes in level */
processed_nodes++; /* increment processed supernode coutner */
}
} /* end loop over supernodes */
/*
* Update supernodes:
* Remove supernodes in current level
* from their parent's children list.
*
*/
/* loop over supernodes in level */
for(i = 0; i < nsupernodes; i++) {
node = supernode_levels[supernode_levels_ptrs[supernode_levels_subtree_ptrs[subtree]+num_levels]+i]; /* get supernode */
supernode_children_num[node] = EMPTY; /* empty children of supernode */
sparent = supernode_parent[node]; /* get parent of supernode*/
/* case if parent of supernode has children */
if(sparent != EMPTY) {
supernode_children_num[sparent]--; /* remove supernode as child of its parent (supernode is processed) */
}
/* get maximum # of descendants in a supernode in current level */
if(ndescendants[node] > desc) {
desc = ndescendants[node];
}
} /* end loop over supernodes */
/* store maximum # descendants a supernode has in current level */
level_descendants[count1++] = desc;
/*
* Get batching info:
*
* Compute the amount of memory needed for batching
* supernodes. That is, store three variables:
*
* 1. maxbatch:
* a. maximum batch size (of supernodes) in any given level
* b. size of buffers to store lists of supernode dimensions
*
* 2. maxndesc:
* a. maximum number of descendants in any batch
* b. size of buffers to store lists of descendant dimensions
*
* 3. maxCsize:
* a. maximum cumulative size of descendants in a batch
* b. size of buffer to store schur complements
*
* The algorithm below finds the optimal (largest) batch size (# supernodes)
* to be used for each level.
*
* But only do this if the subtree is not the root subtree (the last top-of-tree
* subtree).
*
*/
maxnumdescendantsperlevel = 0;
nbatch = MAXBATCHSIZE;
/*
* case if:
* 1. one of GPU subtrees (not root subtree)
* 2. not root only
* 3. not CPU only
*/
if((subtree != gb_p->numSubtreeProper) && (runType != 3) && (runType != 1)) {
/* reset variables */
maxsubtreeCsize = gb_p->maxCsize;
maxsubtreendesc = gb_p->maxndesc;
maxsubtreebatch = gb_p->maxbatch;
maxnumdescendantsperlevel = 0;
gpu_memtot = 0;
gpu_memtot_prev = gpu_memtot;
nbatch = 1;
/* while loop to find batch size for current level */
while(1) {
/* reset variables */
Csize = 0;
ndesc = 0;
gpu_memtot_prev = gpu_memtot;
maxsubtreeCsize_prev = maxsubtreeCsize;
maxsubtreendesc_prev = maxsubtreendesc;
maxsubtreebatch_prev = maxsubtreebatch;
maxnumdescendantsperlevel_prev = maxnumdescendantsperlevel;
/* loop over supernodes in level */
for(i = 0; i < nsupernodes; i++) {
/* get supernode */
node = supernode_levels[supernode_levels_ptrs[supernode_levels_subtree_ptrs[subtree]+num_levels]+i];
/* reset variables (new batch) */
if( !(i % nbatch ) ) {
Csize = 0;
ndesc = 0;
}
Csize += supernode_size_desc[node]; /* add to total size of C buffer needed for storing schur complement in current batch of supernodes */
ndesc += ndescendants[node]; /* add to total # descendants in current batch of supernodes */
if(Csize > maxsubtreeCsize) maxsubtreeCsize = Csize; /* store maximum C buffer size in any given level */
if(ndesc > maxsubtreendesc) maxsubtreendesc = ndesc; /* store maximum # descendants in any given level */
if(nbatch > maxsubtreebatch) maxsubtreebatch = nbatch; /* store maximum batch size in any given level */
if(ndesc > maxnumdescendantsperlevel) maxnumdescendantsperlevel = ndesc;
} /* end loop over supernodes in level */
/* find amount GPU memory needed for subtreeing */
nls = Lpi[L->nsuper] - Lpi[0];
LxSize = max_factor_size*sizeof(double); /* size of factor */
CSize = maxsubtreeCsize*sizeof(double); /* size of C buffer */
LsSize = (nls+1)*sizeof(Int);
MapSize = (n+1)*sizeof(Int)*(maxsubtreebatch); /* size of Map */
ApSize = (A->ncol+1)*sizeof(Int);
AiSize = A->nzmax*sizeof(Int);
AxSize = A->nzmax*sizeof(double);
dimDescSize = maxsubtreendesc*sizeof(Int); /* size of list of dimensions for descendants */
ptrDescSize = maxsubtreendesc*sizeof(double *); /* size of list of pointers for descendants */
dimSuperSize = sizeof(Int)*(maxsubtreebatch); /* size of list of dimensions for supernodes */
ptrSuperSize = sizeof(double *)*(maxsubtreebatch); /* size of list of pointers for supernodes */
/* compute total amount of GPU memory needed */
gpu_memtot_prev = gpu_memtot;
gpu_memtot = IBUFF_LOOPSIZE * (gb_p->LxSizeFactorized + MAP_CACHESIZE * gb_p->MapSizeFactorized)
+ LxSize + CSize + LsSize + MapSize + ApSize + AiSize + AxSize
+ 14*dimDescSize + 6*ptrDescSize + 13*dimSuperSize + 3*ptrSuperSize
+ 2*nbatch*sizeof(Int) + sizeof(Int);
/* case if exceed GPU memory */
if(gpu_memtot >= Common->dev_mempool_size) {
/* store previous values */
if(gpu_memtot_prev) {
gpu_memtot = gpu_memtot_prev;
nbatch = nbatch_prev;
maxsubtreeCsize = maxsubtreeCsize_prev;
maxsubtreendesc = maxsubtreendesc_prev;
maxsubtreebatch = maxsubtreebatch_prev;
maxnumdescendantsperlevel = maxnumdescendantsperlevel_prev;
}
/* exit loop */
break;
}
/* case if reached largest batch size in level */
else if(nbatch == nsupernodes || nbatch >= MAXBATCHSIZE) {
/* exit loop */
break;
}
/* increment batch size */
nbatch_prev = nbatch;
nbatch += 1;
} /* end while loop */
/* store max variables */
if(maxsubtreeCsize > gb_p->maxCsize) gb_p->maxCsize = maxsubtreeCsize; /* maximum C buffer size in any given subtree */
if(maxsubtreendesc > gb_p->maxndesc) gb_p->maxndesc = maxsubtreendesc; /* maximum # descendants in any given subtree */
if(maxsubtreebatch > gb_p->maxbatch) gb_p->maxbatch = maxsubtreebatch; /* maximum batch size in any given subtree */
}
/*
* case if:
* 1. CPU only
*
*/
else if (runType == 1 || runType == 3) {
maxnumdescendantsperlevel = 0;
nbatch = MAXBATCHSIZE;
/* loop over supernodes in level */
for(i = 0; i < nsupernodes; i++) {
/* get supernode */
node = supernode_levels[supernode_levels_ptrs[supernode_levels_subtree_ptrs[subtree]+num_levels]+i];
/* reset variables (new batch) */
if( !(i % nbatch ) ) {
Csize = 0;
ndesc = 0;
}
Csize += supernode_size_desc[node]; /* add to total size of C buffer needed for storing schur complement in current batch of supernodes */
ndesc += ndescendants[node]; /* add to total # descendants in current batch of supernodes */
if(Csize > gb_p->maxCsize) gb_p->maxCsize = Csize; /* store maximum C buffer size in any given level */
if(ndesc > gb_p->maxndesc) gb_p->maxndesc = ndesc; /* store maximum # descendants in any given level */
if(nbatch > gb_p->maxbatch) gb_p->maxbatch = nbatch; /* store maximum batch size in any given level */
if(ndesc > maxnumdescendantsperlevel) maxnumdescendantsperlevel = ndesc;
} /* end loop over supernodes in level */
}
supernode_batch[supernode_levels_subtree_ptrs[subtree]+num_levels] = nbatch; /* batch size per level */
level_num_desc[count0++] = maxnumdescendantsperlevel; /* total # descendants in current level */
/* increment level */
if (supernode_levels_ptrs[supernode_levels_subtree_ptrs[subtree]+num_levels] < count2)
num_levels++;
/* store pointer to level */
supernode_levels_ptrs[supernode_levels_subtree_ptrs[subtree]+num_levels] = count2;
} /* end loop over levels */
/* store number of levels per subtree */
supernode_num_levels[subtree] = num_levels;
/* store counts */
counts[0] = count0;
counts[1] = count1;
counts[2] = count2;
}
/*
* Function:
* get_factor_size
*
* Description:
* computes the size of the subfactor of the subtree
*
*/
void TEMPLATE2 (CHOLMOD (get_factor_size))
(
cholmod_global_pointers *gb_p,
cholmod_cpu_pointers *cpu_p,
cholmod_tree_pointers *tree_p,
Int numSuper,
Int subtree,
Int *max_factor_size,
Int *LpxSub
)
{
/* local variables */
Int p, i, s, nscol, nsrow;
Int *Super, *Lpi, *supernode_subtree, *supernode_subtree_ptrs, *factor_size;
int numSubtree, numSubtreeProper;
/* set variables */
p = 0;
numSubtree = gb_p->numSubtree;
numSubtreeProper = gb_p->numSubtreeProper;
/* set host pointers */
Super = cpu_p->Super;
Lpi = cpu_p->Lpi;
/* set tree pointers */
supernode_subtree = tree_p->supernode_subtree;
supernode_subtree_ptrs = tree_p->supernode_subtree_ptrs;
factor_size = tree_p->factor_size;
/*
* Store factor size in current subtree:
* For each subtree, calculate and store size of subfactor
* (Lxsub). Only do this for subtrees that go to GPU subtrees
* algorithm (not last/root subtree). Also store largest
* subfactor size, of all subtrees.
*/
/* case:
* 1. subtrees that go to GPU only algorithm (not last/root subtree)
*/
if(subtree != numSubtreeProper /*|| numSubtree == 1*/) {
/* loop over supernodes */
for(i=0; i < numSuper; i++) {
/* get size of size of factor for each subtree */
s = supernode_subtree[supernode_subtree_ptrs[subtree] + i];
if (!(tree_p->factorized[s]))
{
nscol = Super [s+1] - Super [s] ;
nsrow = Lpi[s+1] - Lpi[s] ;
LpxSub [s] = p ; /* store pointers to supernodes in sub-factor */
p += nscol * nsrow ; /* increment pointer to supernodes */
}
else
LpxSub[s] = -1;
} /* end loop over supernodes */
} /* end case */
/* store size of sub-factor for each subtree */
factor_size[subtree] = p;
/* store size of largest sub-factor of all subtrees */
if(factor_size[subtree] > (*max_factor_size)) (*max_factor_size) = factor_size[subtree];
}
/*
* Function:
* gpu_num_descendants
*
* Description:
* finds # descendants in supernode
*
*/
void TEMPLATE2 (CHOLMOD (gpu_num_descendants))
(
cholmod_common *Common,
cholmod_cpu_pointers *cpu_p,
cholmod_tree_pointers *tree_p,
Int s
)
{
Int d, n_descendant = 0;
if (tree_p->factorized[s] != 0)
return 0;
d = cpu_p->Head[s];
while ( d != EMPTY )
{
if (tree_p->factorized[d] == 0)
n_descendant++;
d = cpu_p->Next[d];
}
tree_p->ndescendants[s] = n_descendant;
}
/*
#undef REAL
#undef COMPLEX
#undef ZOMPLEX
*/
|
run_encap_decap.c | /*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
/*
Encapsulate a secret and use the secret to encrypt a message
Decapsulate the secret and use the secret to decrypt the encrypted message
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <amcl/utils.h>
#include <amcl/randapi.h>
#include <amcl/bls_BLS381.h>
#include <oqs/oqs.h>
#include <pqnist/pqnist.h>
#define NTHREADS 8
#define MAXSIZE 256
#define G2LEN 4*BFS_BLS381
int main()
{
int i,rc;
// Seed value for CSPRNG
char seed[PQNIST_SEED_LENGTH];
octet SEED = {sizeof(seed),sizeof(seed),seed};
// Seed value for key generation
char seedkeys[NTHREADS][PQNIST_SEED_LENGTH];
csprng RNG;
// Initialization vector
char iv[PQNIST_AES_IV_LENGTH];
octet IV= {sizeof(iv),sizeof(iv),iv};
// Message to be sent to Bob
char p[NTHREADS][MAXSIZE];
octet P[NTHREADS];
// AES CBC ciphertext
char c[NTHREADS][MAXSIZE];
octet C[NTHREADS];
// non random seed value
for (i=0; i<PQNIST_SEED_LENGTH; i++) SEED.val[i]=i+1;
printf("SEED: ");
OCT_output(&SEED);
printf("\n");
// initialise random number generator
CREATE_CSPRNG(&RNG,&SEED);
// Initialise key generation seed
for(i=0; i<NTHREADS; i++)
{
for(int j=0; j<PQNIST_SEED_LENGTH; j++)
{
seedkeys[i][j] = i;
}
}
// Bob's SIKE keys
uint8_t SIKEpk[NTHREADS][OQS_KEM_sike_p751_length_public_key];
uint8_t SIKEsk[NTHREADS][OQS_KEM_sike_p751_length_secret_key];
// Alice's BLS keys (not used)
char BLSpk[NTHREADS][G2LEN];
char BLSsk[NTHREADS][BGS_BLS381];
#pragma omp parallel for
for(i=0; i<NTHREADS; i++)
{
rc = pqnist_keys(seedkeys[i], SIKEpk[i], SIKEsk[i], BLSpk[i], BLSsk[i]);
if (rc)
{
fprintf(stderr, "ERROR pqnist_keys rc: %d\n", rc);
exit(EXIT_FAILURE);
}
int j = OQS_KEM_sike_p751_length_public_key;
printf("Bob SIKE pklen %d pk: ", j);
amcl_print_hex(SIKEpk[i], j);
j = OQS_KEM_sike_p751_length_secret_key;
printf("Bob SIKE sklen %d sk: ", j);
amcl_print_hex(SIKEsk[i], j);
}
// Alice
for(i=0; i<NTHREADS; i++)
{
bzero(p[i],sizeof(p[i]));
P[i].max = MAXSIZE;
P[i].len = sprintf(p[i], "Hello Bob! This is a message from Alice %d", i);
P[i].val = p[i];
// Pad message
int l = 16 - (P[i].len % 16);
if (l < 16)
{
OCT_jbyte(&P[i],0,l);
}
}
// Random initialization value
generateRandom(&RNG,&IV);
printf("Alice IV: ");
OCT_output(&IV);
// Copy plaintext
for(i=0; i<NTHREADS; i++)
{
C[i].val = c[i];
C[i].max = MAXSIZE;
OCT_copy(&C[i],&P[i]);
printf("Alice Plaintext: ");
OCT_output_string(&C[i]);
printf("\n");
}
// SIKE encapsulated key
uint8_t ek[NTHREADS][OQS_KEM_sike_p751_length_ciphertext];
#pragma omp parallel for
for(i=0; i<NTHREADS; i++)
{
// Generate an AES which is ecapsulated using SIKE. Use this key to
// AES encrypt the K parameter.
rc = pqnist_encapsulate_encrypt(C[i].val, C[i].len, IV.val, SIKEpk[i], ek[i]);
if(rc)
{
fprintf(stderr, "ERROR pqnist_encapsulate_encrypt rc: %d\n", rc);
exit(EXIT_FAILURE);
}
printf("Alice ciphertext: ");
OCT_output(&C[i]);
printf("Alice ek %lu ek: ", sizeof(ek[i]));
amcl_print_hex(ek[i], sizeof(ek[i]));
printf("\n");
}
// Bob
#pragma omp parallel for
for(i=0; i<NTHREADS; i++)
{
// Obtain encapsulated AES key and decrypt C
rc = pqnist_decapsulate_decrypt(C[i].val, C[i].len, IV.val, SIKEsk[i], ek[i]);
if(rc)
{
fprintf(stderr, "ERROR pqnist_decapsulate_decrypt rc: %d\n", rc);
exit(EXIT_FAILURE);
}
printf("Bob Plaintext: ");
OCT_output(&C[i]);
printf("Bob Plaintext: ");
OCT_output_string(&C[i]);
printf("\n");
// Compare sent and recieved message (returns 0 for failure)
rc = OCT_comp(&P[i],&C[i]);
if(!rc)
{
fprintf(stderr, "ERROR OCT_comp rc: %d\n", rc);
exit(EXIT_FAILURE);
}
}
// clear memory
OCT_clear(&IV);
for(i=0; i<NTHREADS; i++)
{
OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key);
OQS_MEM_cleanse(BLSsk[i], OQS_SIG_picnic_L5_FS_length_secret_key);
OCT_clear(&P[i]);
OCT_clear(&C[i]);
}
KILL_CSPRNG(&RNG);
exit(EXIT_SUCCESS);
}
|
GB_unop__abs_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_int16_int16
// op(A') function: GB_unop_tran__abs_int16_int16
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CAST(z, aij) \
int16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = aij ; \
Cx [pC] = GB_IABS (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_int16_int16
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_int16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
module_bl_mynn_mym_condensation_impl.h | #ifndef __MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_H__
#define __MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_H__
#ifndef MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_VERSION_MAJOR
#define MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_VERSION_MAJOR 1
#endif
#ifndef MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_VERSION_MINOR
#define MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_VERSION_MINOR 0
#endif
#ifndef MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_PATCH_VERSION
#define MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_PATCH_VERSION 0
#endif
#ifndef MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_CREATE_DATE
#define MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_CREATE_DATE "Date: 12-11-2016 , Time: 13:18 PM GMT+2"
#endif
#ifndef MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_BUILD_DATE
#define MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_BUILD_DATE ""
#endif
#ifndef MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_AUTHOR
#define MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_AUTHOR "Name: Bernard Gingold , e-mail: beniekg@gmail.com"
#endif
#include "module_bl_mynn_F90_iface.h"
#include "PhysLib_Config.h"
#include "std_headers.h"
namespace wrf_phys_wrappers {
namespace module_bl_mynn {
template<typename R32 = float,
typename I32 = int > struct Wrap_Mym_Condensation {
/************************************
Constructors and Destructors.
*************************************/
/*
@Purpose:
Default Constructor - explicitly default.
*/
Wrap_Mym_Condensation() = default;
/*
@Purpose:
1st 'main' Constructor which purpose
is to allocate and initialize scalar
and array members. Array members are
zero-filled. Caller must later initialize
input arrays to correct physical state.
*/
Wrap_Mym_Condensation(_In_ const I32 kts,
_In_ const I32 kte,
_In_ const I32 bl_mynn_cloudpdf,
_In_ const R32 dx,
_In_ const R32 PBLH1,
_In_ const R32 HFX1)
:
m_kts{ kts },
m_kte{ kte },
m_bl_mynn_cloudpdf{ bl_mynn_cloudpdf },
m_dx{ dx },
m_PBLH1{ PBLH1 },
m_HFX1{ HFX1 },
m_dz{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_p{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_exner{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_thl{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_th{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qw{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_tsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_cov{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_Sh{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_el{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_edmf_qc1{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qc_bl1D{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_cldfra_bl1D{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_vt{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_vq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) } {
if (0 > (m_kte - m_kts)) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Invalid array size 1st Ctor: 'Wrap_Mym_Condensation'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Lower range value m_kts: " << m_kts << "\n";
std::cerr << "Upper range value m_kte: " << m_kte << "\n";
std::cerr << "Range value difference: " << m_kte - m_kts << "\n";
std::cerr << "Cannot recover --> calling exit(-1)!!\n";
std::exit(-1);
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
if ((&this->m_dz)[i] == NULL) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 1st Ctor: 'Wrap_Mym_Condensation'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_dz)[i] << "\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
}
#if defined (USE_ICL_OPENMP) && \
OPENMP_CURR_VER >= 40
#pragma omp parallel for if(m_kte >= (1 << 16))
for (int i = m_kts; i != m_kte; ++i) {
m_dz[i] = 0.f;
m_p[i] = 0.f;
m_exner[i] = 0.f;
m_thl[i] = 0.f;
m_th[i] = 0.f;
m_qw[i] = 0.f;
m_tsq[i] = 0.f;
m_qsq[i] = 0.f;
m_cov[i] = 0.f;
m_Sh[i] = 0.f;
m_el[i] = 0.f;
m_edmf_qc1[i] = 0.f;
m_qc_bl1D[i] = 0.f;
m_cldfra_bl1D[i] = 0.f;
m_vt[i] = 0.f;
m_vq[i] = 0.f;
}
#else
#if defined (USE_AUTO_VECTORIZATION)
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
#endif
for (int i = m_kts; i != m_kte; ++i) {
m_dz[i] = 0.f;
m_p[i] = 0.f;
m_exner[i] = 0.f;
m_thl[i] = 0.f;
m_th[i] = 0.f;
m_qw[i] = 0.f;
m_tsq[i] = 0.f;
m_qsq[i] = 0.f;
m_cov[i] = 0.f;
m_Sh[i] = 0.f;
m_el[i] = 0.f;
m_edmf_qc1[i] = 0.f;
m_qc_bl1D[i] = 0.f;
m_cldfra_bl1D[i] = 0.f;
m_vt[i] = 0.f;
m_vq[i] = 0.f;
}
#endif
}
/*
@Purpose:
2nd 'main' Constructor which purpose
is to allocate and initialize scalar
and array members. Array output members are
zero-filled. Caller must pass initialized
input arrays to correct physical state.
*/
Wrap_Mym_Condensation(_In_ const I32 kts,
_In_ const I32 kte,
_In_ const I32 bl_mynn_cloudpdf,
_In_ const R32 dx,
_In_ const R32 PBLH1,
_In_ const R32 HFX1,
_In_ R32* __restrict const dz,
_In_ R32* __restrict const p,
_In_ R32* __restrict const exner,
_In_ R32* __restrict const thl,
_In_ R32* __restrict const th,
_In_ R32* __restrict const qw,
_In_ R32* __restrict const tsq,
_In_ R32* __restrict const qsq,
_In_ R32* __restrict const cov,
_In_ R32* __restrict const Sh,
_In_ R32* __restrict const el,
_In_ R32* __restrict const edmf_qc1)
:
m_kts{ kts },
m_kte{ kte },
m_bl_mynn_cloudpdf{ bl_mynn_cloudpdf },
m_dx{ dx },
m_PBLH1{ PBLH1 },
m_HFX1{ HFX1 },
m_dz{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_p{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_exner{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_thl{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_th{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qw{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_tsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_cov{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_Sh{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_el{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_edmf_qc1{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qc_bl1D{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_cldfra_bl1D{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_vt{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_vq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) } {
if (0 > (m_kte - m_kts)) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Invalid array size 2nd Ctor: 'Wrap_Mym_Condensation'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Lower range value m_kts: " << m_kts << "\n";
std::cerr << "Upper range value m_kte: " << m_kte << "\n";
std::cerr << "Range value difference: " << m_kte - m_kts << "\n";
std::cerr << "Cannot recover --> calling exit(-1)!!\n";
std::exit(-1);
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
if ((&this->m_dz)[i] == NULL) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 2nd Ctor: 'Wrap_Mym_Condensation'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_dz)[i] << "\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
}
if (dz == NULL ||
p == NULL ||
exner == NULL ||
thl == NULL ||
th == NULL ||
qw == NULL ||
tsq == NULL ||
qsq == NULL ||
cov == NULL ||
Sh == NULL ||
el == NULL ||
edmf_qc1 == NULL ) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 2nd Ctor: 'Wrap_Mym_Condensation'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "One or more caller's arrays contains invalid pointer!!\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
#if defined (USE_ICL_OPENMP) && \
OPENMP_CURR_VER >= 40
#pragma omp parallel for if(m_kte >= (1 << 16))
for (int i = m_kts; i != m_kte; ++i) {
m_dz[i] = dz[i];
m_p[i] = p[i];
m_exner[i] = exner[i];
m_thl[i] = thl[i];
m_th[i] = th[i];
m_qw[i] = qw[i];
m_tsq[i] = tsq[i];
m_qsq[i] = qsq[i];
m_cov[i] = cov[i];
m_Sh[i] = Sh[i];
m_el[i] = el[i];
m_edmf_qc1[i] = edmf_qc1[i];
m_qc_bl1D[i] = 0.f;
m_cldfra_bl1D[i] = 0.f;
m_vt[i] = 0.f;
m_vq[i] = 0.f;
}
#else
#if defined (USE_AUTO_VECTORIZATION)
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
for (int i = m_kts; i != m_kte; ++i) {
m_dz[i] = dz[i];
m_p[i] = p[i];
m_exner[i] = exner[i];
m_thl[i] = thl[i];
m_th[i] = th[i];
m_qw[i] = qw[i];
m_tsq[i] = tsq[i];
m_qsq[i] = qsq[i];
m_cov[i] = cov[i];
m_Sh[i] = Sh[i];
m_el[i] = el[i];
m_edmf_qc1[i] = edmf_qc1[i];
m_qc_bl1D[i] = 0.f;
m_cldfra_bl1D[i] = 0.f;
m_vt[i] = 0.f;
m_vq[i] = 0.f;
}
#endif
#endif
}
/*
@Purpose:
Copy Constructor implements deep copy semantics.
*/
Wrap_Mym_Condensation(_In_ const Wrap_Mym_Condensation &x)
:
m_kts{ x.m_kts },
m_kte{ x.m_kte },
m_bl_mynn_cloudpdf{ x.m_bl_mynn_cloudpdf },
m_dx{ x.m_dx },
m_PBLH1{ x.m_PBLH1 },
m_HFX1{ x.m_HFX1 },
m_dz{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_p{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_exner{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_thl{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_th{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qw{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_tsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_cov{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_Sh{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_el{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_edmf_qc1{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qc_bl1D{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_cldfra_bl1D{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_vt{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_vq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) } {
for (int i{ 0 }; i != this->m_totArrays; ++i) {
if ((&this->m_dz)[i] == NULL) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in Copy-Ctor: 'Wrap_Mym_Condensation'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_dz)[i] << "\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
}
#if defined (USE_ICL_OPENMP) && \
OPENMP_CURR_VER >= 40
#pragma omp parallel for if(m_kte >= (1 << 16))
for (int i = m_kts; i != m_kte; ++i) {
m_dz[i] = x.m_dz[i];
m_p[i] = x.m_p[i];
m_exner[i] = x.m_exner[i];
m_thl[i] = x.m_thl[i];
m_th[i] = x.m_th[i];
m_qw[i] = x.m_qw[i];
m_tsq[i] = x.m_tsq[i];
m_qsq[i] = x.m_qsq[i];
m_cov[i] = x.m_cov[i];
m_Sh[i] = x.m_Sh[i];
m_el[i] = x.m_el[i];
m_edmf_qc1[i] = x.m_edmf_qc1[i];
m_qc_bl1D[i] = x.m_qc_bl1D[i];
m_cldfra_bl1D[i] = x.m_cldfra_bl1D[i];
m_vt[i] = x.m_vt[i];
m_vq[i] = x.m_vq[i];
}
#else
#if defined (USE_AUTO_VECTORIZATION)
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
for (int i = m_kts; i != m_kte; ++i) {
m_dz[i] = x.m_dz[i];
m_p[i] = x.m_p[i];
m_exner[i] = x.m_exner[i];
m_thl[i] = x.m_thl[i];
m_th[i] = x.m_th[i];
m_qw[i] = x.m_qw[i];
m_tsq[i] = x.m_tsq[i];
m_qsq[i] = x.m_qsq[i];
m_cov[i] = x.m_cov[i];
m_Sh[i] = x.m_Sh[i];
m_el[i] = x.m_el[i];
m_edmf_qc1[i] = x.m_edmf_qc1[i];
m_qc_bl1D[i] = x.m_qc_bl1D[i];
m_cldfra_bl1D[i] = x.m_cldfra_bl1D[i];
m_vt[i] = x.m_vt[i];
m_vq[i] = x.m_vq[i];
}
#endif
#endif
}
/*
@Purpose:
Move Constructor implements shallow copy semantics.
*/
Wrap_Mym_Condensation(_In_ Wrap_Mym_Condensation &&x)
:
m_kts{ x.m_kts },
m_kte{ x.m_kte },
m_bl_mynn_cloudpdf{ x.m_bl_mynn_cloudpdf },
m_dx{ x.m_dx },
m_PBLH1{ x.m_PBLH1 },
m_HFX1{ x.m_HFX1 } {
for (int i{ 0 }; i != this->m_totArrays; ++i) {
(&this->m_dz)[i] = (&x.m_dz)[i];
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
(&x.m_dz)[i] = NULL;
}
x.m_kts = 0;
x.m_kte = 0;
}
/*
@Purpose:
Class Destructor.
*/
~Wrap_Mym_Condensation() {
for (int i{ 0 }; i != this->m_totArrays; ++i) {
if ((&this->m_dz)[i]) {
_mm_free((&this->m_dz)[i]);
}
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
(&this->m_dz)[i] = NULL;
}
m_kts = 0;
m_kte = 0;
}
/*
@Purpose:
Copy-assign Operator implements deep copy semantics.
*/
Wrap_Mym_Condensation & operator=(_In_ const Wrap_Mym_Condensation &x) {
if (this == &x) return (*this);
m_kts = x.m_kts;
m_kte = x.m_kte;
m_bl_mynn_cloudpdf = x.m_bl_mynn_cloudpdf;
m_dx = x.m_dx;
m_PBLH1 = x.m_PBLH1;
m_HFX1 = x.m_HFX1;
constexpr int ntPtrs1D{16};
R32 *tPtrs1D[ntPtrs1D] = {};
for (int i{ 0 }; i != this->m_totArrays; ++i) {
tPtrs1D[i] = reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)),align32B));
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
if (tPtrs1D[i] == NULL) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in Copy Operator: 'Wrap_Mym_Condensation'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << tPtrs1D[i] << "\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
}
#if defined (USE_ICL_OPENMP) && \
OPENMP_CURR_VER >= 40
#pragma omp parallel for if(m_kte >= (1 << 16))
for (int idx = 0; idx != this->m_totArrays; ++idx) {
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
for (int i = m_kts; i != m_kte; ++i) {
tPtrs1D[idx][i] = (&x.m_dz)[idx][i];
}
}
for (int i {0}; i != this->m_totArrays; ++i) {
_mm_free((&this->m_dz)[i]);
}
for (int i {0}; i != this->m_totArrays; ++i) {
(&this->m_dz)[i] = tPtrs1D[i];
}
return (*this);
#else
#if defined (USE_AUTO_VECTORIZATION)
for (int idx = 0; idx != this->m_totArrays; ++idx) {
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
for (int i = m_kts; i != m_kte; ++i) {
tPtrs1D[idx][i] = (&x.m_dz)[idx][i];
}
}
#endif
for (int i{ 0 }; i != this->m_totArrays; ++i) {
_mm_free((&this->m_dz)[i]);
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
(&this->m_dz)[i] = tPtrs1D[i];
}
return (*this);
#endif
}
Wrap_Mym_Condensation & operator=(_In_ Wrap_Mym_Condensation &&x) {
if (this == &x) return (*this);
m_kts = x.m_kts;
m_kte = x.m_kte;
m_bl_mynn_cloudpdf = x.m_bl_mynn_cloudpdf;
m_dx = x.m_dx;
m_PBLH1 = x.m_PBLH1;
m_HFX1 = x.m_HFX1;
for (int i{ 0 }; i != this->m_totArrays; ++i) {
if ((&this->m_dz)[i]) {
_mm_free((&this->m_dz)[i]);
}
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
(&this->m_dz)[i] = (&x.m_dz)[i];
}
for (int i{ 0 }; i != x.m_totArrays; ++i) {
(&x.m_dz)[i] = NULL;
}
x.m_kts = 0;
x.m_kte = 0;
return (*this);
}
/*
@Purpose:
Call Fortran 90 'MYM_CONDENSATION' subroutine.
*/
void Call_Mym_Condensation() {
MODULE_BL_MYNN_mp_MYM_CONDENSATION(&this->m_kts, &this->m_kte,
&this->m_dx, &this->m_dz[0],
&this->m_thl[0], &this->m_qw[0],
&this->m_p[0], &this->m_exner[0],
&this->m_tsq[0], &this->m_qsq[0], &this->m_cov[0],
&this->m_Sh[0], &this->m_el[0], &this->m_bl_mynn_cloudpdf,
&this->m_qc_bl1D[0] ,&this->m_cldfra_bl1D[0],
&this->m_PBLH1, &this->m_HFX1,
&this->m_edmf_qc1[0],
&this->m_vt[0], &this->m_vq[0], &this->m_th[0]);
}
/*
@Purpose:
Member variables:
*/
I32 m_kts;
I32 m_kte;
I32 m_bl_mynn_cloudpdf;
R32 m_dx;
R32 m_PBLH1;
R32 m_HFX1;
// Input arrays.
_Field_size_(m_kte) R32* __restrict m_dz;
_Field_size_(m_kte) R32* __restrict m_p;
_Field_size_(m_kte) R32* __restrict m_exner;
_Field_size_(m_kte) R32* __restrict m_thl;
_Field_size_(m_kte) R32* __restrict m_th;
_Field_size_(m_kte) R32* __restrict m_qw;
_Field_size_(m_kte) R32* __restrict m_tsq;
_Field_size_(m_kte) R32* __restrict m_qsq;
_Field_size_(m_kte) R32* __restrict m_cov;
_Field_size_(m_kte) R32* __restrict m_Sh;
_Field_size_(m_kte) R32* __restrict m_el;
_Field_size_(m_kte) R32* __restrict m_edmf_qc1;
// Output arrays.
_Field_size_(m_kte) R32* __restrict m_qc_bl1D;
_Field_size_(m_kte) R32* __restrict m_cldfra_bl1D;
_Field_size_(m_kte) R32* __restrict m_vt;
_Field_size_(m_kte) R32* __restrict m_vq;
static const int m_totArrays = 16;
};
}
}
#endif /*__MODULE_BL_MYNN_MYM_CONDENSATION_IMPL_H__*/ |
openmp-ex10.c | #include <stdio.h>
#include <omp.h>
int main(void)
{
int N = 10;
/* We could to loop parallelization with just what we've seen so far */
#pragma omp parallel
{
int my_thread = omp_get_thread_num();
int num_threads = omp_get_num_threads();
int istart = (N * my_thread) / num_threads;
int iend = (N * (my_thread+1)) / num_threads;
int i;
for (i = istart; i < iend; i++) {
printf("iteration %d, thread %d\n", i, my_thread);
}
}
return 0;
}
|
GB_binop__bxnor_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_int32)
// A.*B function (eWiseMult): GB (_AemultB_01__bxnor_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__bxnor_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int32)
// C=scalar+B GB (_bind1st__bxnor_int32)
// C=scalar+B' GB (_bind1st_tran__bxnor_int32)
// C=A+scalar GB (_bind2nd__bxnor_int32)
// C=A'+scalar GB (_bind2nd_tran__bxnor_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_INT32 || GxB_NO_BXNOR_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bxnor_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bxnor_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bxnor_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
collect_independent_sets.h | /**
* @file collect_independent_sets.h
* @author Yibo Lin
* @date Mar 2019
*/
#ifndef _DREAMPLACE_INDEPENDENT_SET_MATCHING_COLLECT_INDEPENDENT_SETS_H
#define _DREAMPLACE_INDEPENDENT_SET_MATCHING_COLLECT_INDEPENDENT_SETS_H
#include <random>
#include "independent_set_matching/src/construct_selected_node2bin_map.h"
DREAMPLACE_BEGIN_NAMESPACE
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
int partitioning_diamond(const DetailedPlaceDBType& db, IndependentSetMatchingStateType& state)
{
// assume cells have been distributed to bins
state.independent_sets.resize(state.batch_size);
state.solutions.resize(state.batch_size);
state.target_pos_x.resize(state.batch_size);
state.target_pos_y.resize(state.batch_size);
int num_independent_sets = 0;
for (int i = 0; i < db.num_movable_nodes; ++i)
{
int seed_node = state.ordered_nodes.at(i);
if (state.selected_markers.at(seed_node))
{
typename DetailedPlaceDBType::type seed_height = db.node_size_y[seed_node];
auto const& seed_bin = state.node2bin_map.at(seed_node);
int num_bins_x = db.num_bins_x;
int num_bins_y = db.num_bins_y;
int seed_bin_x = seed_bin.bin_id/num_bins_y;
int seed_bin_y = seed_bin.bin_id%num_bins_y;
auto const& bin2node_map = state.bin2node_map;
auto& independent_set = state.independent_sets.at(num_independent_sets);
++num_independent_sets;
independent_set.clear();
for (int j = 0; j < state.max_diamond_search_sequence; ++j)
{
// get bin (bx, by)
int bx = seed_bin_x+state.search_grids.at(j).ic;
int by = seed_bin_y+state.search_grids.at(j).ir;
if (bx < 0 || bx >= num_bins_x || by < 0 || by >= num_bins_y)
{
continue;
}
int bin_id = bx*num_bins_y + by;
#ifdef DEBUG
dreamplaceAssert(bin_id < (int)bin2node_map.size());
#endif
auto const& bin2nodes = bin2node_map.at(bin_id);
for (auto node_id : bin2nodes)
{
#ifdef DEBUG
dreamplaceAssert(db.node_size_x[node_id] == db.node_size_x[seed_node]);
#endif
if (db.node_size_y[node_id] == seed_height && state.selected_markers.at(node_id))
{
independent_set.push_back(node_id);
state.selected_markers.at(node_id) = 0;
if (independent_set.size() >= (unsigned int)state.set_size)
{
break;
}
}
}
if (independent_set.size() >= (unsigned int)state.set_size)
{
break;
}
}
// make sure batch_size is large enough
if (num_independent_sets >= state.batch_size)
{
break;
}
}
}
#ifdef DEBUG
std::vector<typename DetailedPlaceDBType::type> centers_x (num_independent_sets, 0);
std::vector<typename DetailedPlaceDBType::type> centers_y (num_independent_sets, 0);
std::vector<typename DetailedPlaceDBType::type> partition_distances (num_independent_sets, 0);
std::vector<int> partition_sizes (num_independent_sets);
for (int j = 0; j < num_independent_sets; ++j)
{
for (auto node_id : state.independent_sets.at(j))
{
centers_x.at(j) += db.x[node_id];
centers_y.at(j) += db.y[node_id];
}
centers_x.at(j) /= state.independent_sets.at(j).size();
centers_y.at(j) /= state.independent_sets.at(j).size();
for (auto node_id : state.independent_sets.at(j))
{
partition_distances.at(j) += std::abs(db.x[node_id]-centers_x.at(j))
+ std::abs(db.y[node_id]-centers_y.at(j));
}
}
for (int i = 0; i < num_independent_sets; ++i)
{
dreamplacePrint(kDEBUG, "partition[%d][%lu]: ", i, state.independent_sets.at(i).size());
for (auto node_id : state.independent_sets.at(i))
{
dreamplacePrint(kNONE, "%d ", node_id);
}
if (state.independent_sets.at(i).size())
{
dreamplacePrint(kNONE, "; (%g, %g), avg dist %g\n",
centers_x.at(i),
centers_y.at(i),
partition_distances.at(i)/state.independent_sets.at(i).size());
}
else
{
dreamplacePrint(kNONE, ";\n");
}
}
for (int i = 0; i < num_independent_sets; ++i)
{
partition_sizes.at(i) = state.independent_sets.at(i).size();
}
std::sort(partition_sizes.begin(), partition_sizes.end());
dreamplacePrint(kDEBUG, "partition sizes: ");
for (auto s : partition_sizes)
{
dreamplacePrint(kNONE, "%d ", s);
}
dreamplacePrint(kNONE, "\n");
#endif
return num_independent_sets;
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
int partitioning_kmeans(const DetailedPlaceDBType& db, IndependentSetMatchingStateType& state)
{
std::vector<typename DetailedPlaceDBType::type> centers_x (state.batch_size);
std::vector<typename DetailedPlaceDBType::type> centers_y (state.batch_size);
std::vector<typename DetailedPlaceDBType::type> centers_x_copy (state.batch_size);
std::vector<typename DetailedPlaceDBType::type> centers_y_copy (state.batch_size);
std::vector<typename DetailedPlaceDBType::type> weights (state.batch_size, 1.0);
std::vector<int> partition_sizes (state.batch_size);
std::vector<int> partition_sorted_indices (state.batch_size);
int num_selected = std::count(state.selected_markers.begin(), state.selected_markers.end(), 1);
std::vector<int> selected_nodes;
selected_nodes.reserve(num_selected);
std::vector<int> node2centers_map (num_selected);
for (int node_id = 0; node_id < db.num_movable_nodes; ++node_id)
{
if (state.selected_markers.at(node_id))
{
selected_nodes.push_back(node_id);
}
}
// kmeans
// initialize centers
std::mt19937 gen1(1234); //Standard mersenne_twister_engine seeded with rd()
std::uniform_int_distribution<> dis1(0, num_selected);
for (int i = 0; i < state.batch_size; ++i)
{
int node_id = selected_nodes.at(dis1(gen1));
centers_x.at(i) = db.x[node_id];
centers_y.at(i) = db.y[node_id];
}
// kmeans iterations
for (int iter = 0; iter < 2; ++iter)
{
#ifdef DEBUG
dreamplacePrint(kDEBUG, "# iter %d\n", iter);
#endif
// update node2centers_map
for (int i = 0; i < num_selected; ++i)
{
int node_id = selected_nodes.at(i);
auto node_x = db.x[node_id];
auto node_y = db.y[node_id];
int closest_center = std::numeric_limits<int>::max();
auto closest_center_distance = std::numeric_limits<typename DetailedPlaceDBType::type>::max();
for (int j = 0; j < state.batch_size; ++j)
{
auto distance = (std::abs(node_x-centers_x.at(j)) + std::abs(node_y-centers_y.at(j)))*weights.at(j);
if (distance < closest_center_distance)
{
closest_center = j;
closest_center_distance = distance;
}
}
node2centers_map.at(i) = closest_center;
}
// update center
std::fill(centers_x_copy.begin(), centers_x_copy.end(), 0);
std::fill(centers_y_copy.begin(), centers_y_copy.end(), 0);
std::fill(partition_sizes.begin(), partition_sizes.end(), 0);
for (int i = 0; i < num_selected; ++i)
{
int node_id = selected_nodes.at(i);
auto node_x = db.x[node_id];
auto node_y = db.y[node_id];
int center = node2centers_map.at(i);
centers_x_copy.at(center) += node_x;
centers_y_copy.at(center) += node_y;
partition_sizes.at(center) += 1;
}
for (int j = 0; j < state.batch_size; ++j)
{
if (partition_sizes.at(j))
{
centers_x.at(j) = centers_x_copy.at(j) / partition_sizes.at(j);
centers_y.at(j) = centers_y_copy.at(j) / partition_sizes.at(j);
}
}
// update weight
for (int j = 0; j < state.batch_size; ++j)
{
if (partition_sizes.at(j) > state.set_size)
{
auto ratio = partition_sizes.at(j) / (typename DetailedPlaceDBType::type)state.set_size;
ratio = 1.0 + 0.5*log(ratio);
#ifdef DEBUG
dreamplacePrint(kDEBUG, "partition[%d] weight ratio %g, %d nodes\n", j, ratio, partition_sizes.at(j));
#endif
weights.at(j) *= ratio;
}
}
#if 0
// move the center of the smallest partition to the largest partition
// sort from large to small
std::iota(partition_sorted_indices.begin(), partition_sorted_indices.end(), 0);
std::sort(partition_sorted_indices.begin(), partition_sorted_indices.end(),
[&partition_sizes](int a, int b) {return partition_sizes.at(a) > partition_sizes.at(b);}
);
int reverse_i = state.batch_size-1;
for (int i = 0; i < state.batch_size; ++i)
{
int forward = partition_sorted_indices.at(i);
int backward = partition_sorted_indices.at(reverse_i);
if (partition_sizes.at(forward) > 2*state.set_size && partition_sizes.at(backward) == 0)
{
centers_x.at(backward) = centers_x.at(forward);
centers_y.at(backward) = centers_y.at(forward);
dreamplacePrint(kDEBUG, "move center %d to %d, %d -> %d\n", backward, forward, partition_sizes.at(backward), partition_sizes.at(forward));
--reverse_i;
}
else
{
break;
}
}
#endif
}
// add to independent sets
state.independent_sets.resize(state.batch_size);
state.solutions.resize(state.batch_size);
state.target_pos_x.resize(state.batch_size);
state.target_pos_y.resize(state.batch_size);
for (int i = 0; i < num_selected; ++i)
{
int node_id = selected_nodes.at(i);
int partition_id = node2centers_map.at(i);
state.independent_sets.at(partition_id).push_back(node_id);
}
#ifdef DEBUG
std::vector<typename DetailedPlaceDBType::type> partition_distances (state.batch_size, 0);
for (int i = 0; i < num_selected; ++i)
{
int node_id = selected_nodes.at(i);
int partition_id = node2centers_map.at(i);
partition_distances.at(partition_id) += std::abs(db.x[node_id]-centers_x.at(partition_id))
+ std::abs(db.y[node_id]-centers_y.at(partition_id));
}
for (int i = 0; i < state.batch_size; ++i)
{
dreamplacePrint(kDEBUG, "partition[%d][%d]: ", i, partition_sizes.at(i));
for (auto node_id : state.independent_sets.at(i))
{
dreamplacePrint(kNONE, "%d ", node_id);
}
if (partition_sizes.at(i))
{
dreamplacePrint(kNONE, "; (%g, %g), avg dist %g\n",
centers_x.at(i),
centers_y.at(i),
partition_distances.at(i)/partition_sizes.at(i));
}
else
{
dreamplacePrint(kNONE, ";\n");
}
}
std::sort(partition_sizes.begin(), partition_sizes.end());
dreamplacePrint(kDEBUG, "partition sizes: ");
for (auto s : partition_sizes)
{
dreamplacePrint(kNONE, "%d ", s);
}
dreamplacePrint(kNONE, "\n");
#endif
return state.batch_size;
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
int partitioning_parallel(const DetailedPlaceDBType& db, IndependentSetMatchingStateType& state)
{
std::vector<int> node2partition_map (db.num_movable_nodes, std::numeric_limits<int>::max());
std::vector<typename DetailedPlaceDBType::type> partition_centers_sum_x (state.batch_size, 0);
std::vector<typename DetailedPlaceDBType::type> partition_centers_sum_y (state.batch_size, 0);
std::vector<int> partition_sizes (state.batch_size, 0);
std::vector<typename DetailedPlaceDBType::type> partition_centers_sum_x_new (partition_centers_sum_x);
std::vector<typename DetailedPlaceDBType::type> partition_centers_sum_y_new (partition_centers_sum_y);
std::vector<int> partition_sizes_new (partition_sizes);
std::vector<char> selected_markers (state.selected_markers.begin(), state.selected_markers.end());
std::vector<char> selected_markers_new (selected_markers);
int num_partitions = 0;
int iter = 0;
bool empty = false;
while (!empty)
{
//dreamplacePrint(kDEBUG, "# iter %d, %lu nodes\n", iter, std::count(selected_markers.begin(), selected_markers.end(), 1));
//#pragma omp parallel for num_threads(state.num_threads)
// for (int i = 0; i < state.batch_size; ++i)
// {
// partition_centers_sum_x_new.at(i) = partition_centers_sum_x.at(i);
// partition_centers_sum_y_new.at(i) = partition_centers_sum_y.at(i);
// partition_sizes_new.at(i) = partition_sizes.at(i);
// }
empty = true;
#pragma omp parallel for num_threads(state.num_threads)
for (int seed_node = 0; seed_node < db.num_movable_nodes; ++seed_node)
{
if (selected_markers.at(seed_node))
{
auto seed_height = db.node_size_y[seed_node];
auto seed_x = db.x[seed_node];
auto seed_y = db.y[seed_node];
auto const& seed_bin = state.node2bin_map.at(seed_node);
int num_bins_x = db.num_bins_x;
int num_bins_y = db.num_bins_y;
int seed_bin_x = seed_bin.bin_id/num_bins_y;
int seed_bin_y = seed_bin.bin_id%num_bins_y;
auto const& bin2node_map = state.bin2node_map;
bool min_random_id_flag = true;
int random_id = state.ordered_nodes.at(seed_node);
int closest_partition = std::numeric_limits<int>::max();
typename DetailedPlaceDBType::type closest_partition_distance = std::numeric_limits<typename DetailedPlaceDBType::type>::max();
for (int j = 0; j < state.max_diamond_search_sequence; ++j)
{
// get bin (bx, by)
int bx = seed_bin_x+state.search_grids.at(j).ic;
int by = seed_bin_y+state.search_grids.at(j).ir;
if (bx < 0 || bx >= num_bins_x || by < 0 || by >= num_bins_y)
{
continue;
}
int bin_id = bx*num_bins_y + by;
#ifdef DEBUG
dreamplaceAssert(bin_id < (int)bin2node_map.size());
#endif
auto const& bin2nodes = bin2node_map.at(bin_id);
for (auto node_id : bin2nodes)
{
#ifdef DEBUG
dreamplaceAssert(db.node_size_x[node_id] == db.node_size_x[seed_node]);
#endif
if (db.node_size_y[node_id] == seed_height)
{
if (selected_markers.at(node_id)) // no partition yet
{
if (state.ordered_nodes.at(node_id) < random_id)
{
min_random_id_flag = false;
break;
}
}
else // already has a partition
{
int partition_id = node2partition_map.at(node_id);
#ifdef DEBUG
dreamplaceAssert(partition_id < state.batch_size);
#endif
// not full yet
auto s = partition_sizes.at(partition_id);
if (s < state.set_size)
{
#ifdef DEBUG
dreamplaceAssert(s >= 0);
#endif
auto pcx = partition_centers_sum_x.at(partition_id)/s;
auto pcy = partition_centers_sum_y.at(partition_id)/s;
auto distance = std::abs(seed_x-pcx) + std::abs(seed_y-pcy);
if (distance < closest_partition_distance)
{
closest_partition_distance = distance;
closest_partition = partition_id;
}
}
}
}
}
if (!min_random_id_flag)
{
break;
}
}
// current node has the smallest random id in its search region
if (min_random_id_flag)
{
if (closest_partition == std::numeric_limits<int>::max()) // no closest partition
{
// create new partition
int partition_id = num_partitions;
#ifdef DEBUG
dreamplacePrint(kDEBUG, "create node %d to partition %d\n", seed_node, partition_id);
#endif
node2partition_map.at(seed_node) = partition_id;
partition_centers_sum_x_new.at(partition_id) = seed_x;
partition_centers_sum_y_new.at(partition_id) = seed_y;
partition_sizes_new.at(partition_id) = 1;
#pragma omp atomic
num_partitions += 1;
}
else // has closest partition
{
#ifdef DEBUG
dreamplacePrint(kDEBUG, "add node %d to partition %d\n", seed_node, closest_partition);
dreamplaceAssert(closest_partition < num_partitions);
#endif
// add to closest partition
node2partition_map.at(seed_node) = closest_partition;
auto& pcx = partition_centers_sum_x_new.at(closest_partition);
auto& pcy = partition_centers_sum_y_new.at(closest_partition);
int& s = partition_sizes_new.at(closest_partition);
#pragma omp critical
{
pcx += seed_x;
pcy += seed_y;
s += 1;
}
}
selected_markers_new.at(seed_node) = 0;
#pragma omp atomic
empty &= false;
}
}
}
#pragma omp parallel for num_threads(state.num_threads)
for (int i = 0; i < state.batch_size; ++i)
{
partition_centers_sum_x.at(i) = partition_centers_sum_x_new.at(i);
partition_centers_sum_y.at(i) = partition_centers_sum_y_new.at(i);
partition_sizes.at(i) = partition_sizes_new.at(i);
}
#pragma omp parallel for num_threads(state.num_threads)
for (int i = 0; i < db.num_movable_nodes; ++i)
{
selected_markers.at(i) = selected_markers_new.at(i);
}
++iter;
}
// add to independent sets
state.independent_sets.resize(state.batch_size);
state.solutions.resize(state.batch_size);
state.target_pos_x.resize(state.batch_size);
state.target_pos_y.resize(state.batch_size);
for (int seed_node = 0; seed_node < db.num_movable_nodes; ++seed_node)
{
if (state.selected_markers.at(seed_node))
{
int partition_id = node2partition_map.at(seed_node);
if (partition_id < std::numeric_limits<int>::max())
{
state.independent_sets.at(partition_id).push_back(seed_node);
}
}
}
#ifdef DEBUG
std::vector<typename DetailedPlaceDBType::type> partition_distances (state.batch_size, 0);
for (int seed_node = 0; seed_node < db.num_movable_nodes; ++seed_node)
{
if (state.selected_markers.at(seed_node))
{
int partition_id = node2partition_map.at(seed_node);
if (partition_id < std::numeric_limits<int>::max())
{
partition_distances.at(partition_id) += std::abs(db.x[seed_node]-partition_centers_sum_x.at(partition_id)/partition_sizes.at(partition_id))
+ std::abs(db.y[seed_node]-partition_centers_sum_y.at(partition_id)/partition_sizes.at(partition_id));
}
}
}
for (int i = 0; i < state.batch_size; ++i)
{
dreamplacePrint(kDEBUG, "partition[%d][%d]: ", i, partition_sizes.at(i));
for (auto node_id : state.independent_sets.at(i))
{
dreamplacePrint(kDEBUG, "%d ", node_id);
}
if (partition_sizes.at(i))
{
dreamplacePrint(kDEBUG, "; (%g, %g), avg dist %g\n",
partition_centers_sum_x.at(i)/partition_sizes.at(i),
partition_centers_sum_y.at(i)/partition_sizes.at(i),
partition_distances.at(i)/partition_sizes.at(i));
}
else
{
dreamplacePrint(kDEBUG, ";\n");
}
}
#endif
#ifdef DEBUG
dreamplaceAssert(num_partitions == partition_sizes.size()-std::count(partition_sizes.begin(), partition_sizes.end(), 0));
#endif
return num_partitions;
}
template <typename DetailedPlaceDBType, typename IndependentSetMatchingStateType>
int collect_independent_sets(const DetailedPlaceDBType& db, IndependentSetMatchingStateType& state)
{
construct_selected_node2bin_map(db, state);
for (auto& independent_set : state.independent_sets)
{
independent_set.clear();
}
int num_independent_sets = partitioning_diamond(db, state);
//int num_independent_sets = partitioning_kmeans(db, state);
//int num_independent_sets = partitioning_parallel(db, state);
// sort sets according to large to small
std::sort(state.independent_sets.begin(), state.independent_sets.end(),
[&](const std::vector<int>& s1, const std::vector<int>& s2){
return s1.size() > s2.size();
});
// clean small sets
for (int i = 0; i < (int)state.independent_sets.size(); ++i)
{
if (i >= state.batch_size || state.independent_sets.at(i).size() < 3U)
{
state.independent_sets.at(i).clear();
}
else
{
num_independent_sets = i;
}
}
// shrink large sets
for (auto& independent_set : state.independent_sets)
{
if (independent_set.size() > (unsigned int)state.set_size)
{
independent_set.resize(state.set_size);
}
}
//num_independent_sets = state.independent_sets.size();
//for (int i = 0; i < num_independent_sets; )
//{
// if (state.independent_sets[i].size() < 3U)
// {
// std::swap(state.independent_sets[i], state.independent_sets[num_independent_sets-1]);
// state.independent_sets[num_independent_sets-1].clear();
// --num_independent_sets;
// }
// else
// {
// ++i;
// }
//}
int avg_set_size = 0;
int max_set_size = 0;
for (int i = 0; i < num_independent_sets; ++i)
{
avg_set_size += state.independent_sets.at(i).size();
max_set_size = std::max(max_set_size, (int)state.independent_sets.at(i).size());
}
dreamplacePrint(kDEBUG, "%d sets, average set size %d, max set size %d\n",
num_independent_sets, avg_set_size/num_independent_sets, max_set_size);
#ifdef DEBUG
dreamplacePrint(kDEBUG, "#sizes = %lu, actual %d\n", size2id_map.size(), num_independent_sets);
for (int i = 0; i < num_independent_sets; ++i)
{
auto const& independent_set = state.independent_sets.at(i);
dreamplacePrint(kNONE, "%lu ", independent_set.size());
typename DetailedPlaceDBType::type width = 0;
for (auto node_id : independent_set)
{
if (width == 0)
{
width = db.node_size_x[node_id];
}
else
{
dreamplaceAssertMsg(width == db.node_size_x[node_id], "width inconsistent %g vs %g\n", width, db.node_size_x[node_id]);
}
}
}
dreamplacePrint(kNONE, "\n");
#endif
return num_independent_sets;
}
DREAMPLACE_END_NAMESPACE
#endif
|
GB_unaryop__identity_int8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int8_int32
// op(A') function: GB_tran__identity_int8_int32
// C type: int8_t
// A type: int32_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int8_int32
(
int8_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bget_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bget_int32
// A.*B function (eWiseMult): GB_AemultB__bget_int32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bget_int32
// C+=b function (dense accum): GB_Cdense_accumb__bget_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_int32
// C=scalar+B GB_bind1st__bget_int32
// C=scalar+B' GB_bind1st_tran__bget_int32
// C=A+scalar GB_bind2nd__bget_int32
// C=A'+scalar GB_bind2nd_tran__bget_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_BITGET (aij, bij, int32_t, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_BITGET (x, y, int32_t, 32) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT32 || GxB_NO_BGET_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bget_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bget_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bget_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bget_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bget_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bget_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t bij = Bx [p] ;
Cx [p] = GB_BITGET (x, bij, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bget_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = GB_BITGET (aij, y, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (x, aij, int32_t, 32) ; \
}
GrB_Info GB_bind1st_tran__bget_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (aij, y, int32_t, 32) ; \
}
GrB_Info GB_bind2nd_tran__bget_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hello.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main() {
int ID;
#pragma omp parallel private(ID)
{
ID = omp_get_thread_num();
printf("Hello(%d)",ID);
printf("World(%d)\n",ID);
}
return 0;
}
|
convolution_3x3_pack8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 8b-8a-inch/8a-64-outch/8b
kernel_tm_pack8.create(inch / 8, 64, outch / 8, (size_t)2u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = k0.row(p + i);
const float* k10 = k1.row(p + i);
const float* k20 = k2.row(p + i);
const float* k30 = k3.row(p + i);
const float* k40 = k4.row(p + i);
const float* k50 = k5.row(p + i);
const float* k60 = k6.row(p + i);
const float* k70 = k7.row(p + i);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00 += 8;
}
}
}
}
}
static void conv3x3s1_winograd64_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const __fp16* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
__fp16 tmp[8][8][8];
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8;
for (int m = 0; m < 8; m++)
{
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r03 = vld1q_f16(r0 + 24);
float16x8_t _r04 = vld1q_f16(r0 + 32);
float16x8_t _r05 = vld1q_f16(r0 + 40);
float16x8_t _r06 = vld1q_f16(r0 + 48);
float16x8_t _r07 = vld1q_f16(r0 + 56);
float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f);
float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[7][m], _tmp7m);
// tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
// tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f);
float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f);
// float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
// float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b);
float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b);
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[2][m], _tmp2m);
// tmp[1][m] = tmp12a + tmp12b;
// tmp[2][m] = tmp12a - tmp12b;
float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f);
float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
// float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
// float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b);
float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b);
vst1q_f16(tmp[3][m], _tmp3m);
vst1q_f16(tmp[4][m], _tmp4m);
// tmp[3][m] = tmp34a + tmp34b;
// tmp[4][m] = tmp34a - tmp34b;
float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f);
float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
// float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
// float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b);
float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b);
vst1q_f16(tmp[5][m], _tmp5m);
vst1q_f16(tmp[6][m], _tmp6m);
// tmp[5][m] = tmp56a + tmp56b;
// tmp[6][m] = tmp56a - tmp56b;
r0 += w * 8;
}
__fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 8;
__fp16* r0_tm_1 = r0_tm_0 + tiles * 8;
__fp16* r0_tm_2 = r0_tm_0 + tiles * 16;
__fp16* r0_tm_3 = r0_tm_0 + tiles * 24;
__fp16* r0_tm_4 = r0_tm_0 + tiles * 32;
__fp16* r0_tm_5 = r0_tm_0 + tiles * 40;
__fp16* r0_tm_6 = r0_tm_0 + tiles * 48;
__fp16* r0_tm_7 = r0_tm_0 + tiles * 56;
for (int m = 0; m < 8; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _tmp06 = vld1q_f16(tmp[m][6]);
float16x8_t _tmp07 = vld1q_f16(tmp[m][7]);
float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f);
float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f);
// r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
// r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f);
float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f);
// float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
// float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25);
float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b);
float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b);
// r0_tm[1] = tmp12a + tmp12b;
// r0_tm[2] = tmp12a - tmp12b;
float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
// float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
// float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b);
float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b);
// r0_tm[3] = tmp34a + tmp34b;
// r0_tm[4] = tmp34a - tmp34b;
float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f);
float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
// float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
// float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b);
float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b);
// r0_tm[5] = tmp56a + tmp56b;
// r0_tm[6] = tmp56a - tmp56b;
vst1q_f16(r0_tm_0, _r0tm0);
vst1q_f16(r0_tm_1, _r0tm1);
vst1q_f16(r0_tm_2, _r0tm2);
vst1q_f16(r0_tm_3, _r0tm3);
vst1q_f16(r0_tm_4, _r0tm4);
vst1q_f16(r0_tm_5, _r0tm5);
vst1q_f16(r0_tm_6, _r0tm6);
vst1q_f16(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 64;
r0_tm_1 += tiles * 64;
r0_tm_2 += tiles * 64;
r0_tm_3 += tiles * 64;
r0_tm_4 += tiles * 64;
r0_tm_5 += tiles * 64;
r0_tm_6 += tiles * 64;
r0_tm_7 += tiles * 64;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
__fp16* tm2p = tm2.row<__fp16>(i / 12);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 12x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n"
"ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n"
"sub %0, %0, #128 \n"
"uzp1 v20.8h, v0.8h, v4.8h \n" // 0
"uzp1 v21.8h, v16.8h, v1.8h \n" // 1
"uzp1 v22.8h, v5.8h, v17.8h \n" // 2
"uzp1 v23.8h, v2.8h, v6.8h \n" // 3
"uzp1 v24.8h, v18.8h, v3.8h \n" // 4
"uzp1 v25.8h, v7.8h, v19.8h \n" // 5
"uzp2 v26.8h, v0.8h, v4.8h \n" // 6
"uzp2 v27.8h, v16.8h, v1.8h \n" // 7
"uzp2 v28.8h, v5.8h, v17.8h \n" // 8
"uzp2 v29.8h, v2.8h, v6.8h \n" // 9
"uzp2 v30.8h, v18.8h, v3.8h \n" // 10
"uzp2 v31.8h, v7.8h, v19.8h \n" // 11
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 7 < tiles; i += 8)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 1 < tiles; i += 2)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h}, [%0] \n"
"st1 {v0.8h, v1.8h}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0");
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123
"fmla v20.8h, v12.8h, v0.h[0] \n"
"fmla v21.8h, v12.8h, v0.h[1] \n"
"fmla v22.8h, v12.8h, v0.h[2] \n"
"fmla v23.8h, v12.8h, v0.h[3] \n"
"fmla v24.8h, v12.8h, v0.h[4] \n"
"fmla v25.8h, v12.8h, v0.h[5] \n"
"fmla v26.8h, v12.8h, v0.h[6] \n"
"fmla v27.8h, v12.8h, v0.h[7] \n"
"fmla v28.8h, v12.8h, v1.h[0] \n"
"fmla v29.8h, v12.8h, v1.h[1] \n"
"fmla v30.8h, v12.8h, v1.h[2] \n"
"fmla v31.8h, v12.8h, v1.h[3] \n"
"fmla v20.8h, v13.8h, v1.h[4] \n"
"fmla v21.8h, v13.8h, v1.h[5] \n"
"fmla v22.8h, v13.8h, v1.h[6] \n"
"fmla v23.8h, v13.8h, v1.h[7] \n"
"fmla v24.8h, v13.8h, v2.h[0] \n"
"fmla v25.8h, v13.8h, v2.h[1] \n"
"fmla v26.8h, v13.8h, v2.h[2] \n"
"fmla v27.8h, v13.8h, v2.h[3] \n"
"fmla v28.8h, v13.8h, v2.h[4] \n"
"fmla v29.8h, v13.8h, v2.h[5] \n"
"fmla v30.8h, v13.8h, v2.h[6] \n"
"fmla v31.8h, v13.8h, v2.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v20.8h, v14.8h, v3.h[0] \n"
"fmla v21.8h, v14.8h, v3.h[1] \n"
"fmla v22.8h, v14.8h, v3.h[2] \n"
"fmla v23.8h, v14.8h, v3.h[3] \n"
"fmla v24.8h, v14.8h, v3.h[4] \n"
"fmla v25.8h, v14.8h, v3.h[5] \n"
"fmla v26.8h, v14.8h, v3.h[6] \n"
"fmla v27.8h, v14.8h, v3.h[7] \n"
"fmla v28.8h, v14.8h, v4.h[0] \n"
"fmla v29.8h, v14.8h, v4.h[1] \n"
"fmla v30.8h, v14.8h, v4.h[2] \n"
"fmla v31.8h, v14.8h, v4.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567
"fmla v20.8h, v15.8h, v4.h[4] \n"
"fmla v21.8h, v15.8h, v4.h[5] \n"
"fmla v22.8h, v15.8h, v4.h[6] \n"
"fmla v23.8h, v15.8h, v4.h[7] \n"
"fmla v24.8h, v15.8h, v5.h[0] \n"
"fmla v25.8h, v15.8h, v5.h[1] \n"
"fmla v26.8h, v15.8h, v5.h[2] \n"
"fmla v27.8h, v15.8h, v5.h[3] \n"
"fmla v28.8h, v15.8h, v5.h[4] \n"
"fmla v29.8h, v15.8h, v5.h[5] \n"
"fmla v30.8h, v15.8h, v5.h[6] \n"
"fmla v31.8h, v15.8h, v5.h[7] \n"
"fmla v20.8h, v16.8h, v6.h[0] \n"
"fmla v21.8h, v16.8h, v6.h[1] \n"
"fmla v22.8h, v16.8h, v6.h[2] \n"
"fmla v23.8h, v16.8h, v6.h[3] \n"
"fmla v24.8h, v16.8h, v6.h[4] \n"
"fmla v25.8h, v16.8h, v6.h[5] \n"
"fmla v26.8h, v16.8h, v6.h[6] \n"
"fmla v27.8h, v16.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v7.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[1] \n"
"fmla v30.8h, v16.8h, v7.h[2] \n"
"fmla v31.8h, v16.8h, v7.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011
"fmla v20.8h, v17.8h, v7.h[4] \n"
"fmla v21.8h, v17.8h, v7.h[5] \n"
"fmla v22.8h, v17.8h, v7.h[6] \n"
"fmla v23.8h, v17.8h, v7.h[7] \n"
"fmla v24.8h, v17.8h, v8.h[0] \n"
"fmla v25.8h, v17.8h, v8.h[1] \n"
"fmla v26.8h, v17.8h, v8.h[2] \n"
"fmla v27.8h, v17.8h, v8.h[3] \n"
"fmla v28.8h, v17.8h, v8.h[4] \n"
"fmla v29.8h, v17.8h, v8.h[5] \n"
"fmla v30.8h, v17.8h, v8.h[6] \n"
"fmla v31.8h, v17.8h, v8.h[7] \n"
"fmla v20.8h, v18.8h, v9.h[0] \n"
"fmla v21.8h, v18.8h, v9.h[1] \n"
"fmla v22.8h, v18.8h, v9.h[2] \n"
"fmla v23.8h, v18.8h, v9.h[3] \n"
"fmla v24.8h, v18.8h, v9.h[4] \n"
"fmla v25.8h, v18.8h, v9.h[5] \n"
"fmla v26.8h, v18.8h, v9.h[6] \n"
"fmla v27.8h, v18.8h, v9.h[7] \n"
"fmla v28.8h, v18.8h, v10.h[0] \n"
"fmla v29.8h, v18.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v10.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.8h, v19.8h, v10.h[4] \n"
"fmla v21.8h, v19.8h, v10.h[5] \n"
"fmla v22.8h, v19.8h, v10.h[6] \n"
"fmla v23.8h, v19.8h, v10.h[7] \n"
"fmla v24.8h, v19.8h, v11.h[0] \n"
"fmla v25.8h, v19.8h, v11.h[1] \n"
"fmla v26.8h, v19.8h, v11.h[2] \n"
"fmla v27.8h, v19.8h, v11.h[3] \n"
"fmla v28.8h, v19.8h, v11.h[4] \n"
"fmla v29.8h, v19.8h, v11.h[5] \n"
"fmla v30.8h, v19.8h, v11.h[6] \n"
"fmla v31.8h, v19.8h, v11.h[7] \n"
"bne 0b \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v0.h[1] \n"
"fmla v18.8h, v8.8h, v0.h[2] \n"
"fmla v19.8h, v8.8h, v0.h[3] \n"
"fmla v20.8h, v8.8h, v0.h[4] \n"
"fmla v21.8h, v8.8h, v0.h[5] \n"
"fmla v22.8h, v8.8h, v0.h[6] \n"
"fmla v23.8h, v8.8h, v0.h[7] \n"
"fmla v16.8h, v9.8h, v1.h[0] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v1.h[2] \n"
"fmla v19.8h, v9.8h, v1.h[3] \n"
"fmla v20.8h, v9.8h, v1.h[4] \n"
"fmla v21.8h, v9.8h, v1.h[5] \n"
"fmla v22.8h, v9.8h, v1.h[6] \n"
"fmla v23.8h, v9.8h, v1.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v16.8h, v10.8h, v2.h[0] \n"
"fmla v17.8h, v10.8h, v2.h[1] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v2.h[3] \n"
"fmla v20.8h, v10.8h, v2.h[4] \n"
"fmla v21.8h, v10.8h, v2.h[5] \n"
"fmla v22.8h, v10.8h, v2.h[6] \n"
"fmla v23.8h, v10.8h, v2.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v11.8h, v3.h[0] \n"
"fmla v17.8h, v11.8h, v3.h[1] \n"
"fmla v18.8h, v11.8h, v3.h[2] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v20.8h, v11.8h, v3.h[4] \n"
"fmla v21.8h, v11.8h, v3.h[5] \n"
"fmla v22.8h, v11.8h, v3.h[6] \n"
"fmla v23.8h, v11.8h, v3.h[7] \n"
"fmla v16.8h, v12.8h, v4.h[0] \n"
"fmla v17.8h, v12.8h, v4.h[1] \n"
"fmla v18.8h, v12.8h, v4.h[2] \n"
"fmla v19.8h, v12.8h, v4.h[3] \n"
"fmla v20.8h, v12.8h, v4.h[4] \n"
"fmla v21.8h, v12.8h, v4.h[5] \n"
"fmla v22.8h, v12.8h, v4.h[6] \n"
"fmla v23.8h, v12.8h, v4.h[7] \n"
"fmla v16.8h, v13.8h, v5.h[0] \n"
"fmla v17.8h, v13.8h, v5.h[1] \n"
"fmla v18.8h, v13.8h, v5.h[2] \n"
"fmla v19.8h, v13.8h, v5.h[3] \n"
"fmla v20.8h, v13.8h, v5.h[4] \n"
"fmla v21.8h, v13.8h, v5.h[5] \n"
"fmla v22.8h, v13.8h, v5.h[6] \n"
"fmla v23.8h, v13.8h, v5.h[7] \n"
"fmla v16.8h, v14.8h, v6.h[0] \n"
"fmla v17.8h, v14.8h, v6.h[1] \n"
"fmla v18.8h, v14.8h, v6.h[2] \n"
"fmla v19.8h, v14.8h, v6.h[3] \n"
"fmla v20.8h, v14.8h, v6.h[4] \n"
"fmla v21.8h, v14.8h, v6.h[5] \n"
"fmla v22.8h, v14.8h, v6.h[6] \n"
"fmla v23.8h, v14.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v7.h[0] \n"
"fmla v17.8h, v15.8h, v7.h[1] \n"
"fmla v18.8h, v15.8h, v7.h[2] \n"
"fmla v19.8h, v15.8h, v7.h[3] \n"
"fmla v20.8h, v15.8h, v7.h[4] \n"
"fmla v21.8h, v15.8h, v7.h[5] \n"
"fmla v22.8h, v15.8h, v7.h[6] \n"
"fmla v23.8h, v15.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v18.8h, v8.8h, v2.h[0] \n"
"fmla v19.8h, v8.8h, v3.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v2.h[1] \n"
"fmla v19.8h, v9.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v3.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v18.8h, v11.8h, v2.h[3] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v18.8h, v12.8h, v2.h[4] \n"
"fmla v19.8h, v12.8h, v3.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v18.8h, v13.8h, v2.h[5] \n"
"fmla v19.8h, v13.8h, v3.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"fmla v18.8h, v14.8h, v2.h[6] \n"
"fmla v19.8h, v14.8h, v3.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"fmla v18.8h, v15.8h, v2.h[7] \n"
"fmla v19.8h, v15.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i + 1 < tiles; i += 2)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.8h}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v16.8h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
float16x8_t _bias0 = bias ? vld1q_f16((const __fp16*)bias + p * 8) : vdupq_n_f16(0.f);
__fp16 tmp[6][8][8];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 8;
const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8;
const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16;
const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24;
const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32;
const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40;
const __fp16* output0_tm_6 = output0_tm_0 + tiles * 48;
const __fp16* output0_tm_7 = output0_tm_0 + tiles * 56;
__fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 8;
// TODO neon optimize
for (int m = 0; m < 8; m++)
{
float16x8_t _out0tm0 = vld1q_f16(output0_tm_0);
float16x8_t _out0tm1 = vld1q_f16(output0_tm_1);
float16x8_t _out0tm2 = vld1q_f16(output0_tm_2);
float16x8_t _out0tm3 = vld1q_f16(output0_tm_3);
float16x8_t _out0tm4 = vld1q_f16(output0_tm_4);
float16x8_t _out0tm5 = vld1q_f16(output0_tm_5);
float16x8_t _out0tm6 = vld1q_f16(output0_tm_6);
float16x8_t _out0tm7 = vld1q_f16(output0_tm_7);
float16x8_t _tmp024a = vaddq_f16(_out0tm1, _out0tm2);
float16x8_t _tmp135a = vsubq_f16(_out0tm1, _out0tm2);
// float tmp024a = output0_tm[1] + output0_tm[2];
// float tmp135a = output0_tm[1] - output0_tm[2];
float16x8_t _tmp024b = vaddq_f16(_out0tm3, _out0tm4);
float16x8_t _tmp135b = vsubq_f16(_out0tm3, _out0tm4);
// float tmp024b = output0_tm[3] + output0_tm[4];
// float tmp135b = output0_tm[3] - output0_tm[4];
float16x8_t _tmp024c = vaddq_f16(_out0tm5, _out0tm6);
float16x8_t _tmp135c = vsubq_f16(_out0tm5, _out0tm6);
// float tmp024c = output0_tm[5] + output0_tm[6];
// float tmp135c = output0_tm[5] - output0_tm[6];
float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f));
float16x8_t _tmp2m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float16x8_t _tmp4m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[2][m], _tmp2m);
vst1q_f16(tmp[4][m], _tmp4m);
// tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
// tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
// tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float16x8_t _tmp1m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float16x8_t _tmp3m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float16x8_t _tmp5m = vaddq_f16(vaddq_f16(_out0tm7, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f));
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[3][m], _tmp3m);
vst1q_f16(tmp[5][m], _tmp5m);
// tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
// tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
// tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 64;
output0_tm_1 += tiles * 64;
output0_tm_2 += tiles * 64;
output0_tm_3 += tiles * 64;
output0_tm_4 += tiles * 64;
output0_tm_5 += tiles * 64;
output0_tm_6 += tiles * 64;
output0_tm_7 += tiles * 64;
}
for (int m = 0; m < 6; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _tmp06 = vld1q_f16(tmp[m][6]);
float16x8_t _tmp07 = vld1q_f16(tmp[m][7]);
float16x8_t _tmp024a = vaddq_f16(_tmp01, _tmp02);
float16x8_t _tmp135a = vsubq_f16(_tmp01, _tmp02);
// float tmp024a = tmp0[1] + tmp0[2];
// float tmp135a = tmp0[1] - tmp0[2];
float16x8_t _tmp024b = vaddq_f16(_tmp03, _tmp04);
float16x8_t _tmp135b = vsubq_f16(_tmp03, _tmp04);
// float tmp024b = tmp0[3] + tmp0[4];
// float tmp135b = tmp0[3] - tmp0[4];
float16x8_t _tmp024c = vaddq_f16(_tmp05, _tmp06);
float16x8_t _tmp135c = vsubq_f16(_tmp05, _tmp06);
// float tmp024c = tmp0[5] + tmp0[6];
// float tmp135c = tmp0[5] - tmp0[6];
float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f)));
float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float16x8_t _out04 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1q_f16(output0, _out00);
vst1q_f16(output0 + 16, _out02);
vst1q_f16(output0 + 32, _out04);
// output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
// output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
// output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float16x8_t _out05 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp07, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f)));
vst1q_f16(output0 + 8, _out01);
vst1q_f16(output0 + 24, _out03);
vst1q_f16(output0 + 40, _out05);
// output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
// output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
// output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw * 8;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd42_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch)
{
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 8b-8a-inch/8a-36-outch/8b
kernel_tm_pack8.create(inch / 8, 36, outch / 8, (size_t)2u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 36; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = k0.row(p + i);
const float* k10 = k1.row(p + i);
const float* k20 = k2.row(p + i);
const float* k30 = k3.row(p + i);
const float* k40 = k4.row(p + i);
const float* k50 = k5.row(p + i);
const float* k60 = k6.row(p + i);
const float* k70 = k7.row(p + i);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00 += 8;
}
}
}
}
}
static void conv3x3s1_winograd42_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const __fp16* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
__fp16 tmp[6][6][8];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const __fp16* r0 = img0.row<const __fp16>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r03 = vld1q_f16(r0 + 24);
float16x8_t _r04 = vld1q_f16(r0 + 32);
float16x8_t _r05 = vld1q_f16(r0 + 40);
float16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f);
float16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f);
float16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f);
float16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f);
float16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f);
float16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[2][m], _tmp2m);
vst1q_f16(tmp[3][m], _tmp3m);
vst1q_f16(tmp[4][m], _tmp4m);
vst1q_f16(tmp[5][m], _tmp5m);
r0 += w * 8;
}
__fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 6 + j) * 8;
__fp16* r0_tm_1 = r0_tm_0 + tiles * 8;
__fp16* r0_tm_2 = r0_tm_0 + tiles * 16;
__fp16* r0_tm_3 = r0_tm_0 + tiles * 24;
__fp16* r0_tm_4 = r0_tm_0 + tiles * 32;
__fp16* r0_tm_5 = r0_tm_0 + tiles * 40;
for (int m = 0; m < 6; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _r0tm0 = vfmsq_n_f16(vfmaq_n_f16(_tmp04, _tmp00, 4.f), _tmp02, 5.f);
float16x8_t _r0tm1 = vfmsq_n_f16(vaddq_f16(_tmp04, _tmp03), vaddq_f16(_tmp01, _tmp02), 4.f);
float16x8_t _r0tm2 = vfmaq_n_f16(vsubq_f16(_tmp04, _tmp03), vsubq_f16(_tmp01, _tmp02), 4.f);
float16x8_t _r0tm3 = vfmsq_n_f16(vsubq_f16(_tmp04, _tmp02), vsubq_f16(_tmp01, _tmp03), 2.f);
float16x8_t _r0tm4 = vfmaq_n_f16(vsubq_f16(_tmp04, _tmp02), vsubq_f16(_tmp01, _tmp03), 2.f);
float16x8_t _r0tm5 = vfmsq_n_f16(vfmaq_n_f16(_tmp05, _tmp01, 4.f), _tmp03, 5.f);
vst1q_f16(r0_tm_0, _r0tm0);
vst1q_f16(r0_tm_1, _r0tm1);
vst1q_f16(r0_tm_2, _r0tm2);
vst1q_f16(r0_tm_3, _r0tm3);
vst1q_f16(r0_tm_4, _r0tm4);
vst1q_f16(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 48;
r0_tm_1 += tiles * 48;
r0_tm_2 += tiles * 48;
r0_tm_3 += tiles * 48;
r0_tm_4 += tiles * 48;
r0_tm_5 += tiles * 48;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
__fp16* tm2p = tm2.row<__fp16>(i / 12);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 12x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n"
"ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n"
"sub %0, %0, #128 \n"
"uzp1 v20.8h, v0.8h, v4.8h \n" // 0
"uzp1 v21.8h, v16.8h, v1.8h \n" // 1
"uzp1 v22.8h, v5.8h, v17.8h \n" // 2
"uzp1 v23.8h, v2.8h, v6.8h \n" // 3
"uzp1 v24.8h, v18.8h, v3.8h \n" // 4
"uzp1 v25.8h, v7.8h, v19.8h \n" // 5
"uzp2 v26.8h, v0.8h, v4.8h \n" // 6
"uzp2 v27.8h, v16.8h, v1.8h \n" // 7
"uzp2 v28.8h, v5.8h, v17.8h \n" // 8
"uzp2 v29.8h, v2.8h, v6.8h \n" // 9
"uzp2 v30.8h, v18.8h, v3.8h \n" // 10
"uzp2 v31.8h, v7.8h, v19.8h \n" // 11
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 7 < tiles; i += 8)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 1 < tiles; i += 2)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h}, [%0] \n"
"st1 {v0.8h, v1.8h}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0");
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123
"fmla v20.8h, v12.8h, v0.h[0] \n"
"fmla v21.8h, v12.8h, v0.h[1] \n"
"fmla v22.8h, v12.8h, v0.h[2] \n"
"fmla v23.8h, v12.8h, v0.h[3] \n"
"fmla v24.8h, v12.8h, v0.h[4] \n"
"fmla v25.8h, v12.8h, v0.h[5] \n"
"fmla v26.8h, v12.8h, v0.h[6] \n"
"fmla v27.8h, v12.8h, v0.h[7] \n"
"fmla v28.8h, v12.8h, v1.h[0] \n"
"fmla v29.8h, v12.8h, v1.h[1] \n"
"fmla v30.8h, v12.8h, v1.h[2] \n"
"fmla v31.8h, v12.8h, v1.h[3] \n"
"fmla v20.8h, v13.8h, v1.h[4] \n"
"fmla v21.8h, v13.8h, v1.h[5] \n"
"fmla v22.8h, v13.8h, v1.h[6] \n"
"fmla v23.8h, v13.8h, v1.h[7] \n"
"fmla v24.8h, v13.8h, v2.h[0] \n"
"fmla v25.8h, v13.8h, v2.h[1] \n"
"fmla v26.8h, v13.8h, v2.h[2] \n"
"fmla v27.8h, v13.8h, v2.h[3] \n"
"fmla v28.8h, v13.8h, v2.h[4] \n"
"fmla v29.8h, v13.8h, v2.h[5] \n"
"fmla v30.8h, v13.8h, v2.h[6] \n"
"fmla v31.8h, v13.8h, v2.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v20.8h, v14.8h, v3.h[0] \n"
"fmla v21.8h, v14.8h, v3.h[1] \n"
"fmla v22.8h, v14.8h, v3.h[2] \n"
"fmla v23.8h, v14.8h, v3.h[3] \n"
"fmla v24.8h, v14.8h, v3.h[4] \n"
"fmla v25.8h, v14.8h, v3.h[5] \n"
"fmla v26.8h, v14.8h, v3.h[6] \n"
"fmla v27.8h, v14.8h, v3.h[7] \n"
"fmla v28.8h, v14.8h, v4.h[0] \n"
"fmla v29.8h, v14.8h, v4.h[1] \n"
"fmla v30.8h, v14.8h, v4.h[2] \n"
"fmla v31.8h, v14.8h, v4.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567
"fmla v20.8h, v15.8h, v4.h[4] \n"
"fmla v21.8h, v15.8h, v4.h[5] \n"
"fmla v22.8h, v15.8h, v4.h[6] \n"
"fmla v23.8h, v15.8h, v4.h[7] \n"
"fmla v24.8h, v15.8h, v5.h[0] \n"
"fmla v25.8h, v15.8h, v5.h[1] \n"
"fmla v26.8h, v15.8h, v5.h[2] \n"
"fmla v27.8h, v15.8h, v5.h[3] \n"
"fmla v28.8h, v15.8h, v5.h[4] \n"
"fmla v29.8h, v15.8h, v5.h[5] \n"
"fmla v30.8h, v15.8h, v5.h[6] \n"
"fmla v31.8h, v15.8h, v5.h[7] \n"
"fmla v20.8h, v16.8h, v6.h[0] \n"
"fmla v21.8h, v16.8h, v6.h[1] \n"
"fmla v22.8h, v16.8h, v6.h[2] \n"
"fmla v23.8h, v16.8h, v6.h[3] \n"
"fmla v24.8h, v16.8h, v6.h[4] \n"
"fmla v25.8h, v16.8h, v6.h[5] \n"
"fmla v26.8h, v16.8h, v6.h[6] \n"
"fmla v27.8h, v16.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v7.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[1] \n"
"fmla v30.8h, v16.8h, v7.h[2] \n"
"fmla v31.8h, v16.8h, v7.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011
"fmla v20.8h, v17.8h, v7.h[4] \n"
"fmla v21.8h, v17.8h, v7.h[5] \n"
"fmla v22.8h, v17.8h, v7.h[6] \n"
"fmla v23.8h, v17.8h, v7.h[7] \n"
"fmla v24.8h, v17.8h, v8.h[0] \n"
"fmla v25.8h, v17.8h, v8.h[1] \n"
"fmla v26.8h, v17.8h, v8.h[2] \n"
"fmla v27.8h, v17.8h, v8.h[3] \n"
"fmla v28.8h, v17.8h, v8.h[4] \n"
"fmla v29.8h, v17.8h, v8.h[5] \n"
"fmla v30.8h, v17.8h, v8.h[6] \n"
"fmla v31.8h, v17.8h, v8.h[7] \n"
"fmla v20.8h, v18.8h, v9.h[0] \n"
"fmla v21.8h, v18.8h, v9.h[1] \n"
"fmla v22.8h, v18.8h, v9.h[2] \n"
"fmla v23.8h, v18.8h, v9.h[3] \n"
"fmla v24.8h, v18.8h, v9.h[4] \n"
"fmla v25.8h, v18.8h, v9.h[5] \n"
"fmla v26.8h, v18.8h, v9.h[6] \n"
"fmla v27.8h, v18.8h, v9.h[7] \n"
"fmla v28.8h, v18.8h, v10.h[0] \n"
"fmla v29.8h, v18.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v10.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.8h, v19.8h, v10.h[4] \n"
"fmla v21.8h, v19.8h, v10.h[5] \n"
"fmla v22.8h, v19.8h, v10.h[6] \n"
"fmla v23.8h, v19.8h, v10.h[7] \n"
"fmla v24.8h, v19.8h, v11.h[0] \n"
"fmla v25.8h, v19.8h, v11.h[1] \n"
"fmla v26.8h, v19.8h, v11.h[2] \n"
"fmla v27.8h, v19.8h, v11.h[3] \n"
"fmla v28.8h, v19.8h, v11.h[4] \n"
"fmla v29.8h, v19.8h, v11.h[5] \n"
"fmla v30.8h, v19.8h, v11.h[6] \n"
"fmla v31.8h, v19.8h, v11.h[7] \n"
"bne 0b \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v0.h[1] \n"
"fmla v18.8h, v8.8h, v0.h[2] \n"
"fmla v19.8h, v8.8h, v0.h[3] \n"
"fmla v20.8h, v8.8h, v0.h[4] \n"
"fmla v21.8h, v8.8h, v0.h[5] \n"
"fmla v22.8h, v8.8h, v0.h[6] \n"
"fmla v23.8h, v8.8h, v0.h[7] \n"
"fmla v16.8h, v9.8h, v1.h[0] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v1.h[2] \n"
"fmla v19.8h, v9.8h, v1.h[3] \n"
"fmla v20.8h, v9.8h, v1.h[4] \n"
"fmla v21.8h, v9.8h, v1.h[5] \n"
"fmla v22.8h, v9.8h, v1.h[6] \n"
"fmla v23.8h, v9.8h, v1.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v16.8h, v10.8h, v2.h[0] \n"
"fmla v17.8h, v10.8h, v2.h[1] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v2.h[3] \n"
"fmla v20.8h, v10.8h, v2.h[4] \n"
"fmla v21.8h, v10.8h, v2.h[5] \n"
"fmla v22.8h, v10.8h, v2.h[6] \n"
"fmla v23.8h, v10.8h, v2.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v11.8h, v3.h[0] \n"
"fmla v17.8h, v11.8h, v3.h[1] \n"
"fmla v18.8h, v11.8h, v3.h[2] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v20.8h, v11.8h, v3.h[4] \n"
"fmla v21.8h, v11.8h, v3.h[5] \n"
"fmla v22.8h, v11.8h, v3.h[6] \n"
"fmla v23.8h, v11.8h, v3.h[7] \n"
"fmla v16.8h, v12.8h, v4.h[0] \n"
"fmla v17.8h, v12.8h, v4.h[1] \n"
"fmla v18.8h, v12.8h, v4.h[2] \n"
"fmla v19.8h, v12.8h, v4.h[3] \n"
"fmla v20.8h, v12.8h, v4.h[4] \n"
"fmla v21.8h, v12.8h, v4.h[5] \n"
"fmla v22.8h, v12.8h, v4.h[6] \n"
"fmla v23.8h, v12.8h, v4.h[7] \n"
"fmla v16.8h, v13.8h, v5.h[0] \n"
"fmla v17.8h, v13.8h, v5.h[1] \n"
"fmla v18.8h, v13.8h, v5.h[2] \n"
"fmla v19.8h, v13.8h, v5.h[3] \n"
"fmla v20.8h, v13.8h, v5.h[4] \n"
"fmla v21.8h, v13.8h, v5.h[5] \n"
"fmla v22.8h, v13.8h, v5.h[6] \n"
"fmla v23.8h, v13.8h, v5.h[7] \n"
"fmla v16.8h, v14.8h, v6.h[0] \n"
"fmla v17.8h, v14.8h, v6.h[1] \n"
"fmla v18.8h, v14.8h, v6.h[2] \n"
"fmla v19.8h, v14.8h, v6.h[3] \n"
"fmla v20.8h, v14.8h, v6.h[4] \n"
"fmla v21.8h, v14.8h, v6.h[5] \n"
"fmla v22.8h, v14.8h, v6.h[6] \n"
"fmla v23.8h, v14.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v7.h[0] \n"
"fmla v17.8h, v15.8h, v7.h[1] \n"
"fmla v18.8h, v15.8h, v7.h[2] \n"
"fmla v19.8h, v15.8h, v7.h[3] \n"
"fmla v20.8h, v15.8h, v7.h[4] \n"
"fmla v21.8h, v15.8h, v7.h[5] \n"
"fmla v22.8h, v15.8h, v7.h[6] \n"
"fmla v23.8h, v15.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v18.8h, v8.8h, v2.h[0] \n"
"fmla v19.8h, v8.8h, v3.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v2.h[1] \n"
"fmla v19.8h, v9.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v3.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v18.8h, v11.8h, v2.h[3] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v18.8h, v12.8h, v2.h[4] \n"
"fmla v19.8h, v12.8h, v3.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v18.8h, v13.8h, v2.h[5] \n"
"fmla v19.8h, v13.8h, v3.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"fmla v18.8h, v14.8h, v2.h[6] \n"
"fmla v19.8h, v14.8h, v3.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"fmla v18.8h, v15.8h, v2.h[7] \n"
"fmla v19.8h, v15.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i + 1 < tiles; i += 2)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.8h}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v16.8h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
float16x8_t _bias0 = bias ? vld1q_f16((const __fp16*)bias + p * 8) : vdupq_n_f16(0.f);
__fp16 tmp[4][6][8];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, elemsize, elempack);
const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 6 + j) * 8;
const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8;
const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16;
const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24;
const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32;
const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40;
__fp16* output0 = out0.row<__fp16>(i * 4) + (j * 4) * 8;
// TODO neon optimize
for (int m = 0; m < 6; m++)
{
float16x8_t _out0tm0 = vld1q_f16(output0_tm_0);
float16x8_t _out0tm1 = vld1q_f16(output0_tm_1);
float16x8_t _out0tm2 = vld1q_f16(output0_tm_2);
float16x8_t _out0tm3 = vld1q_f16(output0_tm_3);
float16x8_t _out0tm4 = vld1q_f16(output0_tm_4);
float16x8_t _out0tm5 = vld1q_f16(output0_tm_5);
float16x8_t _tmp02a = vaddq_f16(_out0tm1, _out0tm2);
float16x8_t _tmp13a = vsubq_f16(_out0tm1, _out0tm2);
float16x8_t _tmp02b = vaddq_f16(_out0tm3, _out0tm4);
float16x8_t _tmp13b = vsubq_f16(_out0tm3, _out0tm4);
float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp02a), _tmp02b);
float16x8_t _tmp1m = vfmaq_n_f16(_tmp13a, _tmp13b, 2.f);
float16x8_t _tmp2m = vfmaq_n_f16(_tmp02a, _tmp02b, 4.f);
float16x8_t _tmp3m = vfmaq_n_f16(vaddq_f16(_out0tm5, _tmp13a), _tmp13b, 8.f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[2][m], _tmp2m);
vst1q_f16(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 48;
output0_tm_1 += tiles * 48;
output0_tm_2 += tiles * 48;
output0_tm_3 += tiles * 48;
output0_tm_4 += tiles * 48;
output0_tm_5 += tiles * 48;
}
for (int m = 0; m < 4; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _tmp02a = vaddq_f16(_tmp01, _tmp02);
float16x8_t _tmp13a = vsubq_f16(_tmp01, _tmp02);
float16x8_t _tmp02b = vaddq_f16(_tmp03, _tmp04);
float16x8_t _tmp13b = vsubq_f16(_tmp03, _tmp04);
float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp02a), _tmp02b));
float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(_tmp13a, _tmp13b, 2.f));
float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(_tmp02a, _tmp02b, 4.f));
float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vaddq_f16(_tmp05, _tmp13a), _tmp13b, 8.f));
vst1q_f16(output0, _out00);
vst1q_f16(output0 + 8, _out01);
vst1q_f16(output0 + 16, _out02);
vst1q_f16(output0 + 24, _out03);
output0 += outw * 8;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
__fp16* outptr0 = out0.row<__fp16>(0);
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.8h, v5.8h}, [%1] \n" // r04 r05
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v16.8h, v2.h[0] \n"
"fmla v31.8h, v16.8h, v3.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v18.8h, v3.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v1.h[3] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v20.8h, v2.h[4] \n"
"fmla v31.8h, v20.8h, v3.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v22.8h, v3.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v1.h[7] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v3.h[0] \n"
"fmla v31.8h, v16.8h, v4.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v18.8h, v4.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v3.h[4] \n"
"fmla v31.8h, v20.8h, v4.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v22.8h, v4.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v5.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v5.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v5.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v5.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v12.8h, v13.8h}, [%2] \n" // r14 r15
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v16.8h, v9.h[0] \n"
"fmla v30.8h, v16.8h, v10.h[0] \n"
"fmla v31.8h, v16.8h, v11.h[0] \n"
"fmla v28.8h, v17.8h, v8.h[1] \n"
"fmla v29.8h, v17.8h, v9.h[1] \n"
"fmla v30.8h, v17.8h, v10.h[1] \n"
"fmla v31.8h, v17.8h, v11.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v8.h[2] \n"
"fmla v29.8h, v18.8h, v9.h[2] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v11.h[2] \n"
"fmla v28.8h, v19.8h, v8.h[3] \n"
"fmla v29.8h, v19.8h, v9.h[3] \n"
"fmla v30.8h, v19.8h, v10.h[3] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v20.8h, v9.h[4] \n"
"fmla v30.8h, v20.8h, v10.h[4] \n"
"fmla v31.8h, v20.8h, v11.h[4] \n"
"fmla v28.8h, v21.8h, v8.h[5] \n"
"fmla v29.8h, v21.8h, v9.h[5] \n"
"fmla v30.8h, v21.8h, v10.h[5] \n"
"fmla v31.8h, v21.8h, v11.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v8.h[6] \n"
"fmla v29.8h, v22.8h, v9.h[6] \n"
"fmla v30.8h, v22.8h, v10.h[6] \n"
"fmla v31.8h, v22.8h, v11.h[6] \n"
"fmla v28.8h, v23.8h, v8.h[7] \n"
"fmla v29.8h, v23.8h, v9.h[7] \n"
"fmla v30.8h, v23.8h, v10.h[7] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v16.8h, v11.h[0] \n"
"fmla v31.8h, v16.8h, v12.h[0] \n"
"fmla v28.8h, v17.8h, v9.h[1] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v17.8h, v11.h[1] \n"
"fmla v31.8h, v17.8h, v12.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v9.h[2] \n"
"fmla v29.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v18.8h, v11.h[2] \n"
"fmla v31.8h, v18.8h, v12.h[2] \n"
"fmla v28.8h, v19.8h, v9.h[3] \n"
"fmla v29.8h, v19.8h, v10.h[3] \n"
"fmla v30.8h, v19.8h, v11.h[3] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v20.8h, v11.h[4] \n"
"fmla v31.8h, v20.8h, v12.h[4] \n"
"fmla v28.8h, v21.8h, v9.h[5] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v21.8h, v11.h[5] \n"
"fmla v31.8h, v21.8h, v12.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v9.h[6] \n"
"fmla v29.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v22.8h, v11.h[6] \n"
"fmla v31.8h, v22.8h, v12.h[6] \n"
"fmla v28.8h, v23.8h, v9.h[7] \n"
"fmla v29.8h, v23.8h, v10.h[7] \n"
"fmla v30.8h, v23.8h, v11.h[7] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v16.8h, v12.h[0] \n"
"fmla v31.8h, v16.8h, v13.h[0] \n"
"fmla v28.8h, v17.8h, v10.h[1] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v13.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v10.h[2] \n"
"fmla v29.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v18.8h, v13.h[2] \n"
"fmla v28.8h, v19.8h, v10.h[3] \n"
"fmla v29.8h, v19.8h, v11.h[3] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v13.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v20.8h, v12.h[4] \n"
"fmla v31.8h, v20.8h, v13.h[4] \n"
"fmla v28.8h, v21.8h, v10.h[5] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v13.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v10.h[6] \n"
"fmla v29.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v22.8h, v13.h[6] \n"
"fmla v28.8h, v23.8h, v10.h[7] \n"
"fmla v29.8h, v23.8h, v11.h[7] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v13.h[7] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.8h, v5.8h}, [%3] \n" // r24 r25
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v16.8h, v2.h[0] \n"
"fmla v31.8h, v16.8h, v3.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v18.8h, v3.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v1.h[3] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v20.8h, v2.h[4] \n"
"fmla v31.8h, v20.8h, v3.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v22.8h, v3.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v1.h[7] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v3.h[0] \n"
"fmla v31.8h, v16.8h, v4.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v18.8h, v4.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v3.h[4] \n"
"fmla v31.8h, v20.8h, v4.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v22.8h, v4.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v5.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v5.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v5.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v5.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1] \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v30.8h, v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2] \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v16.8h, v6.h[0] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v5.h[2] \n"
"fmla v29.8h, v18.8h, v6.h[2] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v20.8h, v6.h[4] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v5.h[6] \n"
"fmla v29.8h, v22.8h, v6.h[6] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v6.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[0] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v6.h[2] \n"
"fmla v29.8h, v18.8h, v7.h[2] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3] \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v6.h[4] \n"
"fmla v29.8h, v20.8h, v7.h[4] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v6.h[6] \n"
"fmla v29.8h, v22.8h, v7.h[6] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"add %1, %1, #32 \n"
"add %2, %2, #32 \n"
"add %3, %3, #32 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmul v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"add %1, %1, #16 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"add %2, %2, #16 \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"fadd v30.8h, v30.8h, v31.8h \n"
"add %3, %3, #16 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
r0 += 16;
r1 += 16;
r2 += 16;
}
}
}
}
static void conv3x3s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%1], #64 \n" // r04 r05 r06 r07
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v6.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v6.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v6.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v5.h[0] \n"
"fmla v31.8h, v16.8h, v7.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v18.8h, v7.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v5.h[4] \n"
"fmla v31.8h, v20.8h, v7.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v22.8h, v7.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1] \n" // r08
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v16.8h, v6.h[0] \n"
"fmla v31.8h, v16.8h, v0.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v18.8h, v6.h[2] \n"
"fmla v31.8h, v18.8h, v0.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v4.h[3] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v20.8h, v6.h[4] \n"
"fmla v31.8h, v20.8h, v0.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v0.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v4.h[7] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2], #64 \n" // r14 r15 r16 r17
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v16.8h, v12.h[0] \n"
"fmla v31.8h, v16.8h, v14.h[0] \n"
"fmla v28.8h, v17.8h, v8.h[1] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v14.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v8.h[2] \n"
"fmla v29.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v18.8h, v14.h[2] \n"
"fmla v28.8h, v19.8h, v8.h[3] \n"
"fmla v29.8h, v19.8h, v10.h[3] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v14.h[3] \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v20.8h, v12.h[4] \n"
"fmla v31.8h, v20.8h, v14.h[4] \n"
"fmla v28.8h, v21.8h, v8.h[5] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v14.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v8.h[6] \n"
"fmla v29.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v22.8h, v14.h[6] \n"
"fmla v28.8h, v23.8h, v8.h[7] \n"
"fmla v29.8h, v23.8h, v10.h[7] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v14.h[7] \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v16.8h, v13.h[0] \n"
"fmla v31.8h, v16.8h, v15.h[0] \n"
"fmla v28.8h, v17.8h, v9.h[1] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v17.8h, v13.h[1] \n"
"fmla v31.8h, v17.8h, v15.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v9.h[2] \n"
"fmla v29.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v18.8h, v13.h[2] \n"
"fmla v31.8h, v18.8h, v15.h[2] \n"
"fmla v28.8h, v19.8h, v9.h[3] \n"
"fmla v29.8h, v19.8h, v11.h[3] \n"
"fmla v30.8h, v19.8h, v13.h[3] \n"
"fmla v31.8h, v19.8h, v15.h[3] \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v20.8h, v13.h[4] \n"
"fmla v31.8h, v20.8h, v15.h[4] \n"
"fmla v28.8h, v21.8h, v9.h[5] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v21.8h, v13.h[5] \n"
"fmla v31.8h, v21.8h, v15.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v9.h[6] \n"
"fmla v29.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v22.8h, v13.h[6] \n"
"fmla v31.8h, v22.8h, v15.h[6] \n"
"fmla v28.8h, v23.8h, v9.h[7] \n"
"fmla v29.8h, v23.8h, v11.h[7] \n"
"fmla v30.8h, v23.8h, v13.h[7] \n"
"fmla v31.8h, v23.8h, v15.h[7] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v8.8h}, [%2] \n" // r18
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v16.8h, v12.h[0] \n"
"fmla v30.8h, v16.8h, v14.h[0] \n"
"fmla v31.8h, v16.8h, v8.h[0] \n"
"fmla v28.8h, v17.8h, v10.h[1] \n"
"fmla v29.8h, v17.8h, v12.h[1] \n"
"fmla v30.8h, v17.8h, v14.h[1] \n"
"fmla v31.8h, v17.8h, v8.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v10.h[2] \n"
"fmla v29.8h, v18.8h, v12.h[2] \n"
"fmla v30.8h, v18.8h, v14.h[2] \n"
"fmla v31.8h, v18.8h, v8.h[2] \n"
"fmla v28.8h, v19.8h, v10.h[3] \n"
"fmla v29.8h, v19.8h, v12.h[3] \n"
"fmla v30.8h, v19.8h, v14.h[3] \n"
"fmla v31.8h, v19.8h, v8.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v20.8h, v12.h[4] \n"
"fmla v30.8h, v20.8h, v14.h[4] \n"
"fmla v31.8h, v20.8h, v8.h[4] \n"
"fmla v28.8h, v21.8h, v10.h[5] \n"
"fmla v29.8h, v21.8h, v12.h[5] \n"
"fmla v30.8h, v21.8h, v14.h[5] \n"
"fmla v31.8h, v21.8h, v8.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v10.h[6] \n"
"fmla v29.8h, v22.8h, v12.h[6] \n"
"fmla v30.8h, v22.8h, v14.h[6] \n"
"fmla v31.8h, v22.8h, v8.h[6] \n"
"fmla v28.8h, v23.8h, v10.h[7] \n"
"fmla v29.8h, v23.8h, v12.h[7] \n"
"fmla v30.8h, v23.8h, v14.h[7] \n"
"fmla v31.8h, v23.8h, v8.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" // r24 r25 r26 r27
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v6.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v6.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v6.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v5.h[0] \n"
"fmla v31.8h, v16.8h, v7.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v18.8h, v7.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v5.h[4] \n"
"fmla v31.8h, v20.8h, v7.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v22.8h, v7.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3] \n" // r28
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v16.8h, v6.h[0] \n"
"fmla v31.8h, v16.8h, v0.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v18.8h, v6.h[2] \n"
"fmla v31.8h, v18.8h, v0.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v4.h[3] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v20.8h, v6.h[4] \n"
"fmla v31.8h, v20.8h, v0.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v0.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v4.h[7] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v30.8h, v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1] \n" // r04
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v6.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v6.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v6.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v6.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[0] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v5.h[2] \n"
"fmla v29.8h, v18.8h, v7.h[2] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v4.8h}, [%2] \n" // r14
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v20.8h, v7.h[4] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v5.h[6] \n"
"fmla v29.8h, v22.8h, v7.h[6] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"fmla v28.8h, v16.8h, v6.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v6.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v6.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v6.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3] \n" // r24
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmul v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"add %1, %1, #32 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"add %2, %2, #32 \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"fadd v30.8h, v30.8h, v31.8h \n"
"add %3, %3, #32 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
matrixmultiply-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// Classic i-k-j matrix multiplication
#define N 100
#define M 100
#define K 100
double a[N][M],b[M][K],c[N][K];
int mmm()
{
int i,j,k;
#pragma omp parallel for private(j,k)
for (i = 0; i < N; i++)
for (k = 0; k < K; k++)
for (j = 0; j < M; j++)
c[i][j]= c[i][j]+a[i][k]*b[k][j];
return 0;
}
int main()
{
mmm();
return 0;
}
|
reconstruction.h | #pragma once
#if !__CUDACC__
#include <vector>
#include <algorithm>
#include "common/mathematica_graphics.h"
#if USE_FAST_TEXT_PARSER
#include "util/text_parser.h"
#endif
#endif
#include "2d/strip/response.h"
#include "2d/barrel/geometry_soa.h"
#include "2d/geometry/pixel_map.h"
#include "3d/geometry/point.h"
#include "3d/geometry/voxel_grid.h"
#include "3d/geometry/voxel.h"
#include "3d/geometry/voxel_map.h"
#if _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
namespace PET3D {
namespace Hybrid {
/// 3D hybrid PET reconstruction
template <class ScannerClass, class Kernel2DClass> class Reconstruction {
public:
using Scanner = ScannerClass;
using Kernel2D = Kernel2DClass;
using F = typename Scanner::F;
using S = typename Scanner::S;
using Response = typename Scanner::Response;
using LOR = PET2D::Barrel::LOR<S>;
using StripEvent = PET2D::Strip::Response<F>;
using Voxel = PET3D::Voxel<S>;
using Point2D = PET2D::Point<F>;
using Point = PET3D::Point<F>;
using Vector2D = PET2D::Vector<F>;
using Output = PET3D::VoxelMap<Voxel, F>;
using NaiveOutput = PET3D::VoxelMap<Voxel, int>;
using Grid = PET3D::VoxelGrid<F, S>;
using PixelGrid = typename Grid::PixelGrid;
using Pixel = typename PixelGrid::Pixel;
using Map2D = PET2D::PixelMap<Pixel, F>;
using Geometry = PET2D::Barrel::GeometrySOA<F, S>;
struct FrameEvent {
LOR lor;
F up;
F right;
F tan;
F sec;
size_t pixel_info_begin;
size_t pixel_info_end;
S plane_begin;
S plane_end;
};
struct VoxelKernelInfo {
S ix, iy, iz;
F weight;
};
#if !__CUDACC__
Reconstruction(const Scanner& scanner,
const Grid& grid,
const Geometry& geometry,
bool use_3d_sensitivity = true)
: scanner(scanner),
grid(grid),
geometry(geometry),
rho(grid.pixel_grid.n_columns,
grid.pixel_grid.n_rows,
grid.n_planes,
1),
sensitivity(grid.pixel_grid.n_columns,
grid.pixel_grid.n_rows,
geometry.n_planes_half > 1
? geometry.n_planes_half
: use_3d_sensitivity ? (grid.n_planes / 2) : 1),
kernel_(scanner.sigma_z(), scanner.sigma_dl()),
n_threads_(omp_get_max_threads()),
n_events_per_thread_(n_threads_, 0) {}
F sigma_w(F width) const { return F(0.3) * width; }
Point translate_to_point(const Response& response) {
auto segment = geometry[response.lor].segment;
F t = F(0.5) - response.dl / (2 * segment->length);
return Point(segment->start.x, segment->start.y, response.z_dn)
.iterpolate(Point(segment->end.x, segment->end.y, response.z_up), t);
}
FrameEvent translate_to_frame(const Response& response) {
FrameEvent event;
event.lor = response.lor;
const auto lor_index = event.lor.index();
const auto& segment = geometry.lor_line_segments[lor_index];
const auto R = segment.length / 2;
StripEvent strip_event(response.z_up, response.z_dn, response.dl);
strip_event.calculate_tan_y_z(R, event.tan, event.up, event.right);
F A, B, C;
F half_box_up, half_box_right;
kernel_.ellipse_bb(
event.tan, event.sec, A, B, C, half_box_up, half_box_right);
auto ev_z_left = event.right - half_box_right;
auto ev_z_right = event.right + half_box_right;
event.plane_begin = std::max((S)0, plane(ev_z_left));
event.plane_end = std::min((S)(plane(ev_z_right) + 1), grid.n_planes);
auto y_up = event.up + half_box_up;
auto y_dn = event.up - half_box_up;
auto t_up = (y_up + R) / (2 * R);
auto t_dn = (y_dn + R) / (2 * R);
const auto lor_info_begin = geometry.lor_pixel_info_begin[lor_index];
const auto lor_info_end = geometry.lor_pixel_info_end[lor_index];
const auto pixel_positions_begin =
&geometry.pixel_positions[lor_info_begin];
const auto pixel_positions_end = &geometry.pixel_positions[lor_info_end];
event.pixel_info_end =
std::upper_bound(pixel_positions_begin,
pixel_positions_end,
t_up,
[](const F a, const F b) -> bool { return a < b; }) -
geometry.pixel_positions + 1;
event.pixel_info_begin =
std::lower_bound(pixel_positions_begin,
pixel_positions_end,
t_dn,
[](const F a, const F b) -> bool { return a < b; }) -
geometry.pixel_positions;
return event;
}
S plane(F z) { return S((z - grid.z_left) / grid.pixel_grid.pixel_size); }
bool bb_intersects_grid_with_positive_weight(const FrameEvent& event) {
if (event.plane_end <= 0 || event.plane_begin >= grid.n_planes)
return false;
for (size_t i = event.pixel_info_begin; i < event.pixel_info_end; i++) {
auto pixel = geometry.pixels[i];
if (grid.pixel_grid.contains(pixel) && geometry.pixel_weights[i] > 0)
return true;
}
return false;
}
#if USE_FAST_TEXT_PARSER
void fast_load_txt_events(const char* fn) {
size_t n_lines = 0;
// first just count lines and reserve space
util::text_parser::read_lines(fn, [&](const char*) { ++n_lines; });
events_.reserve(n_lines);
// now read actual values
util::text_parser::read_lines(
fn,
[&](const char* line) {
util::text_parser parser(line);
Response response;
try {
parser >> response.lor.first >> response.lor.second >>
response.z_up >> response.z_dn >> response.dl;
} catch (const char* ex) {
std::cerr << "error line: " << line << std::endl;
throw(ex);
}
auto event = translate_to_frame(response);
if (bb_intersects_grid_with_positive_weight(event))
events_.push_back(event);
});
}
#endif
Reconstruction& operator<<(std::istream& in) {
for (;;) {
Response response(in);
if (!in)
break;
auto event = translate_to_frame(response);
if (bb_intersects_grid_with_positive_weight(event))
events_.push_back(event);
}
return *this;
}
Reconstruction& operator<<(util::ibstream& in) {
for (;;) {
Response response(in);
if (!in)
break;
auto event = translate_to_frame(response);
if (bb_intersects_grid_with_positive_weight(event))
events_.push_back(event);
}
return *this;
}
int n_events() const { return events_.size(); }
const std::vector<FrameEvent>& events() const { return events_; }
FrameEvent frame_event(int i) const { return events_[i]; }
void calculate_weight() {
const auto& pixel_grid = grid.pixel_grid;
sensitivity.assign(0);
for (size_t lor_index = 0; lor_index < geometry.n_lors; ++lor_index) {
const auto& segment = geometry.lor_line_segments[lor_index];
const auto width = geometry.lor_widths[lor_index];
const auto gauss_norm_w = 1 / (sigma_w(width) * std::sqrt(2 * M_PI));
const auto inv_sigma2_w = 1 / (2 * sigma_w(width) * sigma_w(width));
for (size_t pixel_info = geometry.lor_pixel_info_begin[lor_index];
pixel_info < geometry.lor_pixel_info_end[lor_index];
++pixel_info) {
auto pixel = geometry.pixels[pixel_info];
auto center = pixel_grid.center_at(pixel);
auto distance = segment.distance_from(center);
auto kernel_z =
gauss_norm_w * std::exp(-distance * distance * inv_sigma2_w);
geometry.pixel_weights[pixel_info] = kernel_z;
}
}
}
void calculate_sensitivity() {
sensitivity.assign(0);
for (size_t pixel_info = 0; pixel_info < geometry.n_pixel_infos;
++pixel_info) {
const auto pixel = geometry.pixels[pixel_info];
for (size_t plane = 0; plane < geometry.n_planes_half; ++plane) {
Voxel voxel(pixel.x, pixel.y, plane);
const auto voxel_index = pixel_info + geometry.n_pixel_infos * plane;
sensitivity[voxel] += geometry.pixel_weights[voxel_index];
}
}
}
void normalize_geometry_weights() {
#if _OPENMP
#pragma omp parallel for schedule(dynamic)
#endif
for (
#if !_MSC_VER
size_t pixel_info = 0; pixel_info < geometry.n_pixel_infos;
#else
ptrdiff_t pixel_info = 0;
pixel_info < (ptrdiff_t)geometry.n_pixel_infos;
#endif
++pixel_info) {
const auto pixel = geometry.pixels[pixel_info];
for (size_t plane = 0; plane < geometry.n_planes_half; ++plane) {
Voxel voxel(pixel.x, pixel.y, plane);
const auto voxel_index = pixel_info + geometry.n_pixel_infos * plane;
geometry.pixel_weights[voxel_index] /= sensitivity[voxel];
}
}
}
void set_sensitivity_to_one() { sensitivity.assign(1); }
int operator()() {
bool multiplane = geometry.n_planes_half > 1;
bool use_3d_sensitivity = sensitivity.depth > 1;
if (thread_rhos_.size() == 0) {
for (int i = 0; i < n_threads_; ++i) {
thread_rhos_.emplace_back(
grid.pixel_grid.n_columns, grid.pixel_grid.n_rows, grid.n_planes);
thread_kernel_caches_.emplace_back(
grid.pixel_grid.n_columns, grid.pixel_grid.n_rows, grid.n_planes);
}
}
size_t used_pixels = 0, used_voxels = 0, used_events = 0;
const auto& pixel_grid = grid.pixel_grid;
for (auto& thread_rho : thread_rhos_) {
thread_rho.assign(0);
}
for (auto& thread_kernel_cache : thread_kernel_caches_) {
thread_kernel_cache.assign(0);
}
for (auto& n_events : n_events_per_thread_) {
n_events = 0;
}
#if _OPENMP
#pragma omp parallel for schedule(dynamic)
#endif
// --- event loop ----------------------------------------------------------
for (int i = 0; i < n_events(); ++i) {
int thread = omp_get_thread_num();
n_events_per_thread_[thread]++;
const auto event = frame_event(i);
const auto lor = event.lor;
const auto lor_index = lor.index();
const auto& segment = geometry.lor_line_segments[lor_index];
const auto R = segment.length / 2;
F denominator = 0;
// -- voxel loop - denominator -------------------------------------------
for (auto info_index = event.pixel_info_begin;
info_index < event.pixel_info_end;
++info_index) {
used_pixels++; // statistics
const auto pixel = geometry.pixels[info_index];
const auto pixel_weight = geometry.pixel_weights[info_index];
const auto pixel_index = pixel_grid.index(pixel);
const auto center = pixel_grid.center_at(pixel);
const auto up = segment.projection_relative_middle(center);
for (int iz = event.plane_begin; iz < event.plane_end; ++iz) {
used_voxels++; // statistics
const Voxel voxel(pixel.x, pixel.y, iz);
const auto z = grid.center_z_at(voxel);
const auto voxel_index = grid.index(voxel);
const auto kernel2d =
kernel_.normalized(Point2D(event.right, event.up),
event.tan,
event.sec,
R,
scanner.length,
Point2D(z, up));
// FIXME: In some cases we may be at the detector boundary, eg. up
// equal or more than radius (distance between scintillators), this
// gives negative value for 2d analytic kernel.
if (kernel2d <= 0) {
thread_kernel_caches_[thread][voxel_index] = 0;
continue;
}
if (multiplane) {
const auto abs_plane =
compat::abs(iz - (int)geometry.n_planes_half);
const auto kernel_t =
geometry.pixel_weights[abs_plane * geometry.n_pixel_infos +
info_index];
const auto weight = kernel2d * kernel_t * rho[voxel_index];
const auto abs_voxel = Voxel(pixel.x, pixel.y, abs_plane);
denominator += weight * sensitivity[abs_voxel];
thread_kernel_caches_[thread][voxel_index] = weight;
} else if (use_3d_sensitivity) {
const auto abs_plane = compat::abs(iz - (int)sensitivity.depth);
const auto kernel_t = pixel_weight;
const auto weight = kernel2d * kernel_t * rho[voxel_index];
const auto abs_voxel = Voxel(pixel.x, pixel.y, abs_plane);
denominator += weight * sensitivity[abs_voxel];
thread_kernel_caches_[thread][voxel_index] = weight;
} else {
const auto kernel_t = pixel_weight;
const auto weight = kernel2d * kernel_t * rho[voxel_index];
denominator += weight * sensitivity[pixel_index];
thread_kernel_caches_[thread][voxel_index] = weight;
}
}
} // voxel loop - denominator
F inv_denominator;
if (denominator > 0) {
inv_denominator = 1 / denominator;
} else {
#if THROW_ON_ZERO_DENOMINATOR
// NOTE: Even we filter events on read whose BB are out of FOV, it can
// happen that some pixels are in FOV partially, but on the radius, so
// 2D analytic kernel gives zero or negative value.
// Therefore this is only expected case it can happen, any other case
// means a bug in the code.
std::cerr << std::endl; // keeps the progress
std::cerr << "non-positive denominator == < 0" << std::endl;
std::cerr << " event = " << i << std::endl;
std::cerr << " denominator = " << denominator << std::endl;
std::cerr << " planes = " << event.plane_begin << ":"
<< event.plane_end << std::endl;
std::cerr << " emission = " << point(event) << std::endl;
std::cerr << " lor = " << event.lor.first << " "
<< event.lor.second << std::endl;
std::cerr << "pixels:" << std::endl;
std::cerr << " (" << geometry.pixels[event.pixel_info_begin].x << ","
<< geometry.pixels[event.pixel_info_begin].y << ") to ("
<< geometry.pixels[event.pixel_info_end].x << ","
<< geometry.pixels[event.pixel_info_end].y
<< "):" << std::endl;
for (auto info_index = event.pixel_info_begin;
info_index < event.pixel_info_end;
++info_index) {
const auto pixel = geometry.pixels[info_index];
const auto pixel_weight = geometry.pixel_weights[info_index];
const auto center = pixel_grid.center_at(pixel);
std::cerr << " (" << pixel.x << "," << pixel.y << ") "
<< pixel_weight << " " << center << "\n";
}
throw("denominator == 0 !");
#else
continue;
#endif
}
// -- voxel loop ---------------------------------------------------------
for (auto info_index = event.pixel_info_begin;
info_index < event.pixel_info_end;
++info_index) {
const auto pixel = geometry.pixels[info_index];
for (auto iz = event.plane_begin; iz < event.plane_end; ++iz) {
const Voxel voxel(pixel.x, pixel.y, iz);
const auto voxel_index = grid.index(voxel);
thread_rhos_[thread][voxel_index] +=
thread_kernel_caches_[thread][voxel_index] * inv_denominator;
}
} // voxel loop
} // event loop
rho.assign(0);
for (int thread = 0; thread < n_threads_; ++thread) {
for (int i = 0; i < grid.n_voxels; ++i) {
rho[i] += thread_rhos_[thread][i];
}
used_events += n_events_per_thread_[thread];
}
// save statistics
statistics_.used_pixels = used_pixels;
statistics_.used_voxels = used_voxels;
statistics_.used_events = used_events;
return used_events;
}
Point point(const FrameEvent& event) {
const auto& segment = geometry.lor_line_segments[event.lor.index()];
const auto point2d = segment.mid_point + segment.direction * event.up;
return Point(point2d.x, point2d.y, event.right);
}
NaiveOutput naive() {
NaiveOutput image(
grid.pixel_grid.n_columns, grid.pixel_grid.n_rows, grid.n_planes, 0);
for (const auto& event : events_) {
auto p = point(event);
const auto voxel = grid.voxel_at(p);
if (grid.contains(voxel)) {
++image[voxel];
}
}
return image;
}
void graph_frame_event(Common::MathematicaGraphics<F>& graphics,
int event_index) {
auto event = events_[event_index];
auto lor = event.lor;
graphics.add(scanner.barrel, lor);
graphics.add(geometry[lor].segment);
for (auto info_index = event.pixel_info_begin;
info_index < event.pixel_info_end;
++info_index) {
const auto& pixel_info = geometry[lor].pixel_infos[info_index];
graphics.add_pixel(grid.pixel_grid, pixel_info.pixel);
}
}
/// Event statistics
struct EventStatistics {
size_t min_pixels, max_pixels; ///< min/max of 2D barrel pixel number
size_t min_planes, max_planes; ///< min/max of 2D strip plane number
size_t min_voxels, max_voxels; ///< min/max of voxel number
double avg_pixels, avg_planes, avg_voxels; ///< averages
};
/// Calculates event statistics
void event_statistics(EventStatistics& st) {
st.min_pixels = st.min_planes = st.min_voxels = grid.n_voxels;
st.max_pixels = st.max_planes = st.max_voxels = 0;
size_t total_pixels = 0, total_planes = 0, total_voxels = 0;
for (const auto& event : events_) {
auto pixels = event.pixel_info_end - event.pixel_info_begin;
size_t planes = event.plane_end - event.plane_begin;
auto voxels = pixels * planes;
total_pixels += pixels;
total_planes += planes;
total_voxels += voxels;
if (pixels < st.min_pixels)
st.min_pixels = pixels;
if (planes < st.min_planes)
st.min_planes = planes;
if (voxels < st.min_voxels)
st.min_voxels = voxels;
if (pixels > st.max_pixels)
st.max_pixels = pixels;
if (planes > st.max_planes)
st.max_planes = planes;
if (voxels > st.max_voxels)
st.max_voxels = voxels;
}
st.avg_pixels = (double)total_pixels / n_events();
st.avg_planes = (double)total_planes / n_events();
st.avg_voxels = (double)total_voxels / n_events();
}
/// Reconstruction statistics
struct Statistics {
size_t used_pixels; ///< number of pixels used for reconstruction
size_t used_voxels; ///< number of voxels used for reconstruction
size_t used_events; ///< number of events used for reconstruction
};
/// Return reconstruction statistics
const Statistics& statistics() const { return statistics_; }
public:
const Scanner& scanner;
const Grid grid;
const Geometry& geometry;
Output rho;
Output sensitivity;
private:
std::vector<FrameEvent> events_;
Kernel2D kernel_;
Statistics statistics_;
int n_threads_;
std::vector<Output> thread_rhos_;
std::vector<Output> thread_kernel_caches_;
std::vector<VoxelKernelInfo> voxel_cache_;
std::vector<int> n_events_per_thread_;
#endif // !__CUDACC__
};
} // Hybrid
} // PET3D
|
reduce3.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/*
* reduce3.h
*
* Created on: Dec 28, 2015
* Author: agibsonccc
*/
#ifndef REDUCE3_H_
#define REDUCE3_H_
#define EXTRA_PARAMS_LENGTH 10
#include <templatemath.h>
#include <helper_cuda.h>
#include <helpers/sharedmem.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <pairwise_util.h>
#include <dll.h>
#include <helpers/shape.h>
#include <ops/ops.h>
#include <op_boilerplate.h>
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#endif
#ifndef _OPENMP
#define omp_get_thread_num() 0
#define omp_get_max_threads() 1
#endif
#include "legacy_ops.h"
namespace functions {
namespace reduce3 {
/**
* Reduce involving
* 2 arrays
*/
template<typename T>
class Reduce3 {
public:
#ifdef __CUDACC__
virtual __device__
inline T opAtomic(T d1, T d2, T *extraParamsRef) = 0;
#endif
#ifdef __CUDACC__
/**
* Aggregate shared memory
* @param sPartialsRef
* @param tid
* @param extraParams
*/
template<typename OpType>
static __inline__ __device__ void aggregatePartials(T **sPartialsRef, Nd4jLong tid, Nd4jLong numItems, T *extraParamsRef) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
T *sPartials = *sPartialsRef;
Nd4jLong floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParamsRef);
}
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads) {
sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParamsRef);
}
__syncthreads();
}
}
/**
Perform a reduction
@param n the number of elements
@param xOffset the starting offset
@param dx the data to perform the reduction on
@param incx the increment on which to perform the reduction
@param extraParams extra parameters used for calculations
@param result where to store the result of the reduction
*/
virtual __inline__ __device__ void transformNoElementWiseStride(
T *dx,
Nd4jLong *xShapeInfo,
T *dy,
Nd4jLong *yShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo,
int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) {
Nd4jLong n = shape::length(xShapeInfo);
int rank = shape::rank(xShapeInfo);
T *sPartials = (T *) manager->getSharedReductionBuffer(); //val.getPointer();
T startingVal = this->startingValue(dx);
// FIXME: this ugly fast fix.
__shared__ T extraZ[2];
if (threadIdx.x == 0) {
extraZ[0] = (T) 0.0;
extraZ[1] = (T) 0.0;
}
sPartials[threadIdx.x] = startingVal;
__syncthreads();
Nd4jLong idx[MAX_RANK];
for(Nd4jLong i = blockIdx.x * gridDim.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) {
shape::ind2subC(rank,shape::shapeOf(xShapeInfo),i, idx);
auto offset = shape::getOffset(0,shape::shapeOf(xShapeInfo),shape::stride(xShapeInfo),idx,rank);
auto yOffset = shape::getOffset(0,shape::shapeOf(yShapeInfo),shape::stride(yShapeInfo),idx,rank);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], this->opAtomic(dx[offset], dy[yOffset], extraZ), extraZ);
}
T **sPartialsRef = (T **) &sPartials;
aggregatePartials(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, n), extraZ);
/**
* Look at something that uses the extra params
* and aggregates the extra values propelry.
*This will be used in summary stats too.
*/
// write result for this block to global mem
if (threadIdx.x == 0) {
if (postProcessOrNot) {
result[blockIdx.x] = postProcess(sPartials[0], n, extraZ);
}
else {
result[blockIdx.x] = sPartials[0];
}
}
}
/**
*
*/
template<typename OpType>
static inline __device__ void execScalarCuda(
T *dx,
Nd4jLong *xShapeInfo,
T *dy,
Nd4jLong *yShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo, int *allocationPointer, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) {
// SharedMemory <T> val;
T *sPartials = (T *) manager->getSharedReductionBuffer(); // val.getPointer();
// FIXME: this ugly fast fix.
__shared__ T extraZ[3];
if (threadIdx.x == 0) {
extraZ[0] = (T) 0.0f;
extraZ[1] = (T) 0.0f;
if (extraParams != NULL) {
extraZ[2] = extraParams[0];
} else extraZ[2] = (T) 0.0f;
}
__syncthreads();
T startingVal = OpType::startingValue(dx);
Nd4jLong length = shape::length(xShapeInfo);
int xElementWiseStride = shape::elementWiseStride(xShapeInfo);
int yElementWiseStride = shape::elementWiseStride(yShapeInfo);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
char xOrder = shape::order(xShapeInfo);
char yOrder = shape::order(yShapeInfo);
if(xOrder == yOrder && (xElementWiseStride > 0 && yElementWiseStride > 0) && shape::strideDescendingCAscendingF(xShapeInfo) && shape::strideDescendingCAscendingF(yShapeInfo)) {
if (xElementWiseStride == 1 && yElementWiseStride == 1) {
for(Nd4jLong i = tid; i < length; i+= gridDim.x * blockDim.x) {
startingVal = OpType::update(startingVal, OpType::opAtomic(dx[i], dy[i], extraZ), extraZ);
}
}
else {
for(Nd4jLong i = tid; i < length; i+= gridDim.x * blockDim.x) {
startingVal = OpType::update(startingVal, OpType::opAtomic(dx[i * xElementWiseStride], dy[i * yElementWiseStride], extraZ), extraZ);
}
}
sPartials[threadIdx.x] = startingVal;
} else {
__shared__ Nd4jLong *xShape;
__shared__ Nd4jLong *yShape;
__shared__ Nd4jLong *xStride;
__shared__ Nd4jLong *yStride;
__shared__ int rank;
if (threadIdx.x == 0) {
xShape = shape::shapeOf(xShapeInfo);
yShape = shape::shapeOf(yShapeInfo);
xStride = shape::stride(xShapeInfo);
yStride = shape::stride(yShapeInfo);
rank = shape::rank(xShapeInfo);
}
__syncthreads();
T startingVal = OpType::startingValue(dx);
T *sPartials = (T *) manager->getSharedReductionBuffer();
Nd4jLong xCoords[MAX_RANK];
Nd4jLong yCoords[MAX_RANK];
sPartials[threadIdx.x] = startingVal;
for(Nd4jLong i = tid ;i < length; i += gridDim.x * blockDim.x) {
shape::ind2subC(rank,xShape,i,xCoords);
shape::ind2subC(rank,yShape,i,yCoords);
auto offset = shape::getOffset(0, xShape, xStride, xCoords,rank);
auto yOffset = shape::getOffset(0,yShape, yStride, yCoords,rank);
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::opAtomic(dx[offset], dy[yOffset], extraZ), extraZ);
}
}
__syncthreads();
T **sPartialsRef = (T **) &sPartials;
aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, length), extraZ);
__syncthreads();
if (gridDim.x > 1) {
unsigned int *tc = (unsigned int *)reductionBuffer;
__shared__ bool amLast;
int rank = shape::rank(xShapeInfo);
tid = threadIdx.x;
T *extraBuffer = (T *) allocationPointer;
if (threadIdx.x == 0) {
reductionBuffer[blockIdx.x] = sPartials[0];
extraBuffer[blockIdx.x] = extraZ[0];
extraBuffer[gridDim.x + blockIdx.x] = extraZ[1];
}
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
sPartials[tid] = startingVal;
__syncthreads();
if (amLast) {
tc[16384] = 0;
sPartials[threadIdx.x] = OpType::startingValue(dx);
// TODO: later probably replace this. Right now we need extraZ sync for CosineSimilarity ONLY
if (tid == 0 && extraZ[0] != (T) 0.0 && extraZ[1] != (T) 0.0) {
extraZ[0] = 0.0;
extraZ[1] = 0.0;
for (int i = 0; i < gridDim.x; i++) {
extraZ[0] += extraBuffer[i];
extraZ[1] += extraBuffer[gridDim.x + i];
}
}
for (Nd4jLong i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraZ);
}
__syncthreads();
aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x), extraZ);
__syncthreads();
if (threadIdx.x == 0) {
result[0] = OpType::postProcess(sPartials[0], length, extraZ);
}
}
} else {
if (tid == 0) {
unsigned int *tc = (unsigned *)reductionBuffer;
tc[16384] = 0;
result[0] = OpType::postProcess(sPartials[0], length, extraZ);
}
}
}
template<typename OpType>
__device__
static inline void transformAll(
T *dx,
Nd4jLong *xShapeInfo,
T *dy,
Nd4jLong *yShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationPointer,
UnifiedSharedMemory *manager,
Nd4jLong *xTadShapeInfo,
Nd4jLong *xOffsets,
Nd4jLong *yTadShapeInfo,
Nd4jLong *yOffsets) {
// initialize partials first
T *sPartials = (T *) manager->getSharedReductionBuffer();
T startingVal = OpType::startingValue(dx);
sPartials[threadIdx.x] = startingVal;
T *tempX = sPartials + blockDim.x;
const int maxBlock = blockDim.x;
__shared__ T extraZ[OpType::extraParamsLen > 0 ? OpType::extraParamsLen : 1];
__shared__ int xTadLength;
__shared__ int yTadLength;
__shared__ int xTads;
__shared__ int yTads;
__shared__ Nd4jLong *xShape;
__shared__ Nd4jLong *xStride;
__shared__ int xRank;
__shared__ Nd4jLong *yShape;
__shared__ Nd4jLong *yStride;
__shared__ int yRank;
//reading initial data
if (threadIdx.x == 0) {
xTadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
yTadLength = shape::tadLength(yShapeInfo, dimension, dimensionLength);
xTads = shape::length(xShapeInfo) / xTadLength;
yTads = shape::length(yShapeInfo) / yTadLength;
xShape = shape::shapeOf(xTadShapeInfo);
xStride = shape::stride(xTadShapeInfo);
xRank = shape::rank(xTadShapeInfo);
yShape = shape::shapeOf(yTadShapeInfo);
yStride = shape::stride(yTadShapeInfo);
yRank = shape::rank(yTadShapeInfo);
}
__syncthreads();
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
int limit = xTadLength / maxBlock;
if (xTadLength % maxBlock > 0)
limit++;
for (int r = blockIdx.x; r < xTads; r += blockDim.x * gridDim.x) {
T *x = dx + xOffsets[r];
if (threadIdx.x < xTadLength && threadIdx.x < maxBlock) {
if (shape::order(xTadShapeInfo) == 'c') {
shape::ind2subC(xRank, xShape, threadIdx.x, xCoord);
} else {
shape::ind2sub(xRank, xShape, threadIdx.x, xCoord);
}
auto xO = shape::getOffset(0, xShape, xStride, xCoord, xRank);
tempX[threadIdx.x] = x[xO];
}
for (int g = 0; g < yTads; g++) {
T *y = dy + yOffsets[g];
int ri = (r * yTads) + g;
sPartials[threadIdx.x] = startingVal;
if (OpType::extraParamsLen > 0 && threadIdx.x < OpType::extraParamsLen) {
extraZ[threadIdx.x] = (T) startingVal;
}
__syncthreads();
// we might have data too large for single cache block, rendering cache useless though :(
for (int t = 0; t < limit; t++) {
// we reset tempX IF we have >1 tiles
if (t >= 1 || (limit > 1 && g > 0))
if (threadIdx.x + (t * maxBlock) < xTadLength) {
if (shape::order(xTadShapeInfo) == 'c') {
shape::ind2subC(xRank, xShape, threadIdx.x + (t * maxBlock), xCoord);
} else {
shape::ind2sub(xRank, xShape, threadIdx.x + (t * maxBlock), xCoord);
}
Nd4jLong xO = shape::getOffset(0, xShape, xStride, xCoord, xRank);
tempX[threadIdx.x] = x[xO];
// tempX[threadIdx.x] = x[threadIdx.x + (t * maxBlock)];
}
for (int f = threadIdx.x + (t * maxBlock); f < xTadLength && f < threadIdx.x + ((t + 1) * maxBlock); f += blockDim.x * gridDim.x) {
if (shape::order(yTadShapeInfo) == 'c') {
shape::ind2subC(yRank, yShape, f, yCoord);
} else {
shape::ind2sub(yRank, yShape, f, yCoord);
}
Nd4jLong yO = shape::getOffset(0, yShape, yStride, yCoord, yRank);
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::opAtomic(tempX[threadIdx.x], y[yO], extraZ), extraZ);
}
// we MUST step through this block altogether
__syncthreads();
}
T **sPartialsRef = (T **) &sPartials;
aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, xTadLength), extraZ);
__syncthreads();
if (threadIdx.x == 0) {
result[ri] = OpType::postProcess(sPartials[threadIdx.x],xTadLength, extraZ);
}
__syncthreads();
}
}
}
/**
Perform a reduction
@param n the number of elements
@param xOffset the starting offset
@param dx the data to perform the reduction on
@param incx the increment on which to perform the reduction
@param extraParams extra parameters used for calculations
@param result where to store the result of the reduction
*/
template<typename OpType>
__device__
static inline void transform(
T *dx,
Nd4jLong *xShapeInfo,
T *dy,
Nd4jLong *yShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationPointer,
UnifiedSharedMemory *manager,
Nd4jLong *tadOnlyShapeInfo,
Nd4jLong *tadOffsets,
Nd4jLong *yTadOnlyShapeInfo,
Nd4jLong *yTadOffsets) {
/**
* Gpu information for the problem
*/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int resultScalar;
__shared__ int xElementWiseStride;
__shared__ int yElementWiseStride;
//shared memory space for storing intermediate results
//SharedMemory <T> val;
T *sPartials = (T *) manager->getSharedReductionBuffer(); //val.getPointer();
T init = OpType::startingValue(dx);
sPartials[threadIdx.x] = init;
__shared__ T extraZ[OpType::extraParamsLen > 0 ? OpType::extraParamsLen : 1];
//length for the tad
__shared__ Nd4jLong resultLength;
__shared__ int tadLength;
__shared__ int yLength;
__shared__ int tadElementWiseStride;
__shared__ int yTadElementWiseStride;
T startingVal = OpType::startingValue(dx);
T reduction = OpType::startingValue(dx);
if (threadIdx.x == 0) {
if (resultShapeInfo != nullptr)
resultLength = shape::length(resultShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (dimension == nullptr || dimension[0] == MAX_DIMENSION)
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
auto xStride = shape::stride(xShapeInfo);
char xOrder = shape::order(xShapeInfo);
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadElementWiseStride = shape::elementWiseStride(tadOnlyShapeInfo);
yLength = shape::length(yShapeInfo);
if (yTadOnlyShapeInfo != nullptr)
yTadElementWiseStride = shape::elementWiseStride(yTadOnlyShapeInfo);
}
__syncthreads();
// code branch for TAD vs full array
if (tadLength == yLength) {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
auto yShape = shape::shapeOf(yShapeInfo);
auto yStride = shape::stride(yShapeInfo);
auto xShape = shape::shapeOf(tadOnlyShapeInfo);
auto xStride = shape::stride(tadOnlyShapeInfo);
int yRank = shape::rank(yShapeInfo);
int xRank = shape::rank(tadOnlyShapeInfo);
for(int i = blockIdx.x; i < resultLength; i+= gridDim.x) {
int xOffsetForTad = tadOffsets[i];
if (OpType::extraParamsLen > 0 && threadIdx.x < OpType::extraParamsLen) {
extraZ[threadIdx.x] = (T) startingVal;
}
__syncthreads();
for(int j = threadIdx.x; j < tadLength; j += blockDim.x) {
shape::ind2subC(xRank,xShape, j, xCoord);
shape::ind2subC(yRank,yShape, j, yCoord);
Nd4jLong xOffset = shape::getOffset(xOffsetForTad, xShape, xStride, xCoord, xRank);
Nd4jLong yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank);
sPartials[threadIdx.x] = j < blockDim.x ? OpType::opAtomic(dx[xOffset],dy[yOffset], extraZ) : OpType::update(sPartials[threadIdx.x], OpType::opAtomic(dx[xOffset],dy[yOffset], extraZ), extraZ);
}
__syncthreads();
T **sPartialsRef = (T **) &sPartials;
aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraZ);
__syncthreads();
if (threadIdx.x == 0)
result[i] = OpType::postProcess(sPartials[threadIdx.x],tadLength, extraZ);
__syncthreads();
}
} else if (!resultScalar) {
if(tadElementWiseStride >= 1 && yTadElementWiseStride) {
for(int i = blockIdx.x; i < resultLength; i+= gridDim.x) {
int xOffsetForTad = tadOffsets[i];
int yOffsetForTad = yTadOffsets[i];
if (OpType::extraParamsLen > 0 && threadIdx.x < OpType::extraParamsLen) {
extraZ[threadIdx.x] = (T) startingVal;
}
__syncthreads();
if (threadIdx.x < tadLength)
sPartials[threadIdx.x] = OpType::op(dx[xOffsetForTad + tadElementWiseStride * threadIdx.x],dy[yOffsetForTad + yTadElementWiseStride * threadIdx.x], extraZ);
for(int j = threadIdx.x + blockDim.x; j < tadLength; j += blockDim.x) {
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffsetForTad + tadElementWiseStride * j],dy[yOffsetForTad + yTadElementWiseStride * j], extraZ), extraZ);
}
__syncthreads();
T **sPartialsRef = (T **) &sPartials;
aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraZ);
__syncthreads();
if (threadIdx.x == 0)
result[i] = OpType::postProcess(sPartials[threadIdx.x],tadLength, extraZ);
__syncthreads();
}
}
else {
/*
// DO NOT REMOVE THIS COMMENTED BLOCK PLEASE
for (int r = blockIdx.x; r < tad->numTads; r += gridDim.x) {
if (threadIdx.x == 0)
tad->createOffsetForBlock(r);
__syncthreads();
int tadOffsetForBlock = tad->tadOffsetForBlock;
T *xVal = dx + tadOffsetForBlock;
sPartials[threadIdx.x] = this->startingValue(xVal);
for(int i = threadIdx.x; i < tad->tadLength; i+= blockDim.x) {
int xOffsetForTad = shape::tadOffset(i, xShapeInfo, dimension, dimensionLength, nullptr);
int yOffsetForTad = shape::tadOffset(i, yShapeInfo, dimension, dimensionLength, nullptr);
sPartials[threadIdx.x] = this->update(sPartials[threadIdx.x],dx[tadOffsetForBlock + i * tad->tadElementWiseStride], extraParams);
}
__syncthreads();
// aggregate. do NOT reduce for elements > tadLength
T **sPartialsRef = (T **) &sPartials;
aggregatePartials(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tad->tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0)
result[r] = this->postProcess(sPartials[threadIdx.x], tad->tadLength, extraParams);
}
*/
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
auto yShape = shape::shapeOf(yTadOnlyShapeInfo);
auto yStride = shape::stride(yTadOnlyShapeInfo);
auto xShape = shape::shapeOf(tadOnlyShapeInfo);
auto xStride = shape::stride(tadOnlyShapeInfo);
int yRank = shape::rank(yTadOnlyShapeInfo);
int xRank = shape::rank(tadOnlyShapeInfo);
for(int i = blockIdx.x; i < resultLength; i+= gridDim.x) {
auto xOffsetForTad = tadOffsets[i];
auto yOffsetForTad = yTadOffsets[i];
if (OpType::extraParamsLen > 0 && threadIdx.x < OpType::extraParamsLen) {
extraZ[threadIdx.x] = (T) startingVal;
}
__syncthreads();
for(int j = threadIdx.x; j < tadLength; j += blockDim.x) {
shape::ind2subC(xRank,xShape, j, xCoord);
shape::ind2subC(yRank,yShape, j, yCoord);
auto xOffset = shape::getOffset(xOffsetForTad, xShape, xStride, xCoord, xRank);
auto yOffset = shape::getOffset(yOffsetForTad, yShape, yStride, yCoord, yRank);
sPartials[threadIdx.x] = j < blockDim.x ? OpType::opAtomic(dx[xOffset],dy[yOffset], extraZ) : OpType::update(sPartials[threadIdx.x], OpType::opAtomic(dx[xOffset],dy[yOffset], extraZ), extraZ);
}
__syncthreads();
T **sPartialsRef = (T **) &sPartials;
aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraZ);
__syncthreads();
if (threadIdx.x == 0)
result[i] = OpType::postProcess(sPartials[threadIdx.x],tadLength, extraZ);
__syncthreads();
}
}
}
}
#endif
#ifdef __CUDACC__
__device__
static inline void exec(
const int opNum,
T *dx,
Nd4jLong *xShapeInfo,
T *dy,
Nd4jLong *yShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationPointer,
UnifiedSharedMemory *manager,
Nd4jLong *tadOnlyShapeInfo,
Nd4jLong *tadOffsets,
Nd4jLong *yTadOnlyShapeInfo,
Nd4jLong *yTadOffsets) {
DISPATCH_BY_OPNUM(transform, PARAMS(dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), REDUCE3_OPS);
}
__device__
static inline void execAllCuda(
const int opNum,
T *dx,
Nd4jLong *xShapeInfo,
T *dy,
Nd4jLong *yShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationPointer,
UnifiedSharedMemory *manager,
Nd4jLong *tadOnlyShapeInfo,
Nd4jLong *tadOffsets,
Nd4jLong *yTadOnlyShapeInfo,
Nd4jLong *yTadOffsets) {
DISPATCH_BY_OPNUM(transformAll, PARAMS(dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), REDUCE3_OPS);
}
__device__
static inline void execScalarCuda(
const int opNum,
T *dx,
Nd4jLong *xShapeInfo,
T *dy,
Nd4jLong *yShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo,
int * allocationPointer,
T *reductionBuffer,
UnifiedSharedMemory *manager,
Nd4jLong *tadOnlyShapeInfo) {
DISPATCH_BY_OPNUM(execScalarCuda, PARAMS(dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, reductionBuffer, manager, tadOnlyShapeInfo), REDUCE3_OPS);
}
#endif
#ifdef __CUDACC__
__host__
#endif
static T execScalar(
const int opNum,
T *x,
Nd4jLong *xShapeInfo,
T *extraParamsVals,
T *y,
Nd4jLong *yShapeInfo) {
RETURNING_DISPATCH_BY_OPNUM(execScalar, PARAMS(x,
xShapeInfo,
extraParamsVals,
y,
yShapeInfo), REDUCE3_OPS);
}
static void exec( const int opNum,
T *x, Nd4jLong *xShapeInfo,
T *extraParamsVals,
T *y,
Nd4jLong *yShapeInfo,
T *result,
Nd4jLong *resultShapeInfoBuffer,
int *dimension,
int dimensionLength) {
DISPATCH_BY_OPNUM(exec, PARAMS(x,
xShapeInfo,
extraParamsVals,
y, yShapeInfo,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength), REDUCE3_OPS);
}
static void exec( const int opNum,
T *x, Nd4jLong *xShapeInfo,
T *extraParamsVals,
T *y,
Nd4jLong *yShapeInfo,
T *result,
Nd4jLong *resultShapeInfoBuffer,
int *dimension,
int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM(exec, PARAMS(x,
xShapeInfo,
extraParamsVals,
y, yShapeInfo,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength, tadShapeInfo, tadOffsets), REDUCE3_OPS);
}
static void execAll( const int opNum,
T *x,
Nd4jLong *xShapeInfo,
T *extraParamsVals,
T *y,
Nd4jLong *yShapeInfo,
T *result,
Nd4jLong *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets,
Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) {
DISPATCH_BY_OPNUM(execAll, PARAMS(x,
xShapeInfo,
extraParamsVals,
y, yShapeInfo,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), REDUCE3_OPS);
}
template<typename OpType>
#ifdef __CUDACC__
__host__
#endif
static T execScalar(
T *x,
Nd4jLong *xShapeInfo,
T *extraParams,
T *y,
Nd4jLong *yShapeInfo) {
T startingVal = OpType::startingValue(x);
Nd4jLong length = shape::length(xShapeInfo);
Nd4jLong xElementWiseStride = shape::elementWiseStride(xShapeInfo);
Nd4jLong yElementWiseStride = shape::elementWiseStride(yShapeInfo);
T extraParamsVals[3] = {(T) 0.0, (T) 0.0, (T) 0.0};
// it's possible case for EqualsWithEps op
if (extraParams != nullptr) {
extraParamsVals[2] = extraParams[0];
}
char xOrder = shape::order(xShapeInfo);
char yOrder = shape::order(yShapeInfo);
if(xOrder == yOrder && (xElementWiseStride >=1 && yElementWiseStride >= 1) && shape::strideDescendingCAscendingF(xShapeInfo) && shape::strideDescendingCAscendingF(yShapeInfo)) {
if (xElementWiseStride == 1 && yElementWiseStride == 1) {
// TODO:: proper reduction required here
for(int i = 0; i < length; i++) {
startingVal = OpType::update(startingVal,
OpType::op(x[i],y[i],
extraParamsVals),
extraParamsVals);
}
return OpType::postProcess(startingVal, length, extraParamsVals);
}
else {
// TODO:: proper reduction required here
for(Nd4jLong i = 0; i < length; i++) {
startingVal = OpType::update(startingVal, OpType::op(x[i * xElementWiseStride],y[i * yElementWiseStride], extraParamsVals), extraParamsVals);
}
return OpType::postProcess(startingVal, length, extraParamsVals);
}
}
else {
Nd4jLong xCoords[MAX_RANK];
Nd4jLong yCoords[MAX_RANK];
int xRank = shape::rank(xShapeInfo);
int yRank = shape::rank(yShapeInfo);
Nd4jLong *xShape = shape::shapeOf(xShapeInfo);
Nd4jLong *xStride = shape::stride(xShapeInfo);
Nd4jLong *yShape = shape::shapeOf(yShapeInfo);
Nd4jLong *yStride = shape::stride(yShapeInfo);
for(unsigned int i = 0 ;i < length; i++) {
shape::ind2subC(xRank, xShape, i, xCoords);
shape::ind2subC(yRank, yShape, i, yCoords);
Nd4jLong offset = shape::getOffset(0, xShape, xStride, xCoords, xRank);
Nd4jLong yOffset = shape::getOffset(0, yShape, yStride, yCoords, yRank);
startingVal = OpType::update(startingVal, OpType::op(x[offset], y[yOffset], extraParamsVals), extraParamsVals);
}
}
return OpType::postProcess(startingVal, length, extraParamsVals);;
}
template<typename OpType>
static void execAll(
T *x,
Nd4jLong *xShapeInfo,
T *extraParams,
T *y,
Nd4jLong *yShapeInfo,
T *result,
Nd4jLong *resultShapeInfoBuffer,
int *dimension,
int dimensionLength, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) {
auto xTadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
auto yTadLength = shape::tadLength(yShapeInfo, dimension, dimensionLength);
auto xTads = shape::length(xShapeInfo) / xTadLength;
auto yTads = shape::length(yShapeInfo) / yTadLength;
auto xShape = shape::shapeOf(xTadShapeInfo);
auto xStride = shape::stride(xTadShapeInfo);
int xRank = shape::rank(xTadShapeInfo);
auto yShape = shape::shapeOf(yTadShapeInfo);
auto yStride = shape::stride(yTadShapeInfo);
int yRank = shape::rank(yTadShapeInfo);
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
T startingVal = OpType::startingValue(x);
#pragma omp parallel for proc_bind(AFFINITY) default(shared) private(xCoord, yCoord)
for (Nd4jLong r = 0; r < xTads; r++) {
Nd4jLong xOffset = xOffsets[r];
T *lX = x + xOffset;
for (Nd4jLong g = 0; g < yTads; g++) {
auto yOffset = yOffsets[g];
T *lY = y + yOffset;
auto ri = (r * yTads) + g;
T *localExtraParams = nullptr;
if (OpType::extraParamsLen > 0)
localExtraParams = new T[OpType::extraParamsLen];
for (int extraParamsIdx = 0; extraParamsIdx < OpType::extraParamsLen; extraParamsIdx++) {
localExtraParams[extraParamsIdx] = startingVal;
}
for (int f = 0; f < xTadLength; f++) {
if (shape::order(yTadShapeInfo) == 'c') {
shape::ind2subC(yRank, yShape, f, yCoord);
} else {
shape::ind2sub(yRank, yShape, f, yCoord);
}
if (shape::order(xTadShapeInfo) == 'c') {
shape::ind2subC(xRank, xShape, f, xCoord);
} else {
shape::ind2sub(xRank, xShape, f, xCoord);
}
Nd4jLong xO = shape::getOffset(0, xShape, xStride, xCoord, xRank);
Nd4jLong yO = shape::getOffset(0, yShape, yStride, yCoord, yRank);
result[ri] = OpType::update(result[ri], OpType::op(lX[xO], lY[yO], localExtraParams), localExtraParams);
}
result[ri] = OpType::postProcess(result[ri], xTadLength, localExtraParams);
if (localExtraParams != nullptr)
delete[] localExtraParams;
}
}
}
template<typename OpType>
static void exec(
T *x,
Nd4jLong *xShapeInfo,
T *extraParams,
T *y,
Nd4jLong *yShapeInfo,
T *result,
Nd4jLong *resultShapeInfoBuffer,
int *dimension,
int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
/*
nd4j_printf("Xp: [%p]; Yp: [%p]; Zp: [%p];\n", (void *) x, (void *) y, (void *) result);
nd4j_printf("XSp: [%p]; YSp: [%p]; ZSp: [%p];\n", (void *) xShapeInfo, (void *) yShapeInfo, (void *) resultShapeInfoBuffer);
nd4j_printf("Ep: [%p]; Dp: [%p]\n", (void *) extraParams, (void *) dimension);
nd4j_printf("TSp: [%p]; TOp: [%p]\n", (void *) tadShapeInfo, (void *) tadOffsets);
nd4j_printf("X[0]: %f\n", x[0]);
nd4j_printf("Y[0]: %f\n", y[0]);
nd4j_printf("Z[0]: %f\n", result[0]);
nd4j_printf("XS[0]: %i\n", xShapeInfo[0]);
nd4j_printf("YS[0]: %i\n", yShapeInfo[0]);
nd4j_printf("ZS[0]: %i\n", resultShapeInfoBuffer[0]);
nd4j_printf("E[0]: %f\n", extraParams[0]);
nd4j_printf("D[0]: %i\n", dimension[0]);
nd4j_printf("TS[0]: %i\n", tadShapeInfo[0]);
nd4j_printf("TO[0]: %lld\n", tadOffsets[0]);
nd4j_printf("dimLength: %i\n", dimensionLength);
*/
T startingVal = OpType::startingValue(x);
auto tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
auto tads = shape::length(xShapeInfo) / tadLength;
auto *xShape = shape::shapeOf(tadShapeInfo);
auto *xStride = shape::stride(tadShapeInfo);
int xRank = shape::rank(tadShapeInfo);
auto *yShape = shape::shapeOf(yShapeInfo);
auto *yStride = shape::stride(yShapeInfo);
int yRank = shape::rank(yShapeInfo);
//shape::printShapeInfoLinear(xShapeInfo);
//shape::printShapeInfoLinear(yShapeInfo);
//shape::printShapeInfoLinear(resultShapeInfoBuffer);
//shape::printShapeInfoLinear(tadShapeInfo);
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
//#pragma omp parallel for proc_bind(AFFINITY) default(shared)
for (Nd4jLong r = 0; r < tads; r++) {
Nd4jLong offset = tadOffsets[r];
T *localExtraParams = nullptr;
if (OpType::extraParamsLen > 0)
localExtraParams = new T[OpType::extraParamsLen];
for (int extraParamsIdx = 0; extraParamsIdx < OpType::extraParamsLen; extraParamsIdx++) {
localExtraParams[extraParamsIdx] = startingVal;
}
for (Nd4jLong f = 0; f < tadLength; f++) {
if (shape::order(tadShapeInfo) == 'c') {
shape::ind2subC(xRank, xShape, f, xCoord);
shape::ind2subC(yRank, yShape, f, yCoord);
} else {
shape::ind2sub(xRank, xShape, f, xCoord);
shape::ind2sub(yRank, yShape, f, yCoord);
}
Nd4jLong xOffset = shape::getOffset(offset, xShape, xStride, xCoord, xRank);
Nd4jLong yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank);
result[r] = OpType::update(result[r], OpType::op(x[xOffset], y[yOffset], localExtraParams), localExtraParams);
}
result[r] = OpType::postProcess(result[r], tadLength, localExtraParams);
if (localExtraParams != nullptr)
delete[] localExtraParams;
}
}
template<typename OpType>
static void exec(
T *x,
Nd4jLong *xShapeInfo,
T *extraParams,
T *y,
Nd4jLong *yShapeInfo,
T *result,
Nd4jLong *resultShapeInfoBuffer,
int *dimension,
int dimensionLength) {
/*
nd4j_printf("Xp: [%p]; Yp: [%p]; Zp: [%p];\n", (void *) x, (void *) y, (void *) result);
nd4j_printf("XSp: [%p]; YSp: [%p]; ZSp: [%p];\n", (void *) xShapeInfo, (void *) yShapeInfo, (void *) resultShapeInfoBuffer);
nd4j_printf("Ep: [%p]; Dp: [%p]\n", (void *) extraParams, (void *) dimension);
nd4j_printf("X[0]: %f\n", x[0]);
nd4j_printf("Y[0]: %f\n", y[0]);
nd4j_printf("Z[0]: %f\n", result[0]);
nd4j_printf("XS[0]: %i\n", xShapeInfo[0]);
nd4j_printf("YS[0]: %i\n", yShapeInfo[0]);
nd4j_printf("ZS[0]: %i\n", resultShapeInfoBuffer[0]);
nd4j_printf("E[0]: %f\n", extraParams[0]);
nd4j_printf("D[0]: %i\n", dimension[0]);
nd4j_printf("dimLength: %i\n", dimensionLength);
*/
T extraParamsVals[3] = {(T) 0.0, (T) 0.0, (T) 0.0};
if(shape::isScalar(resultShapeInfoBuffer)) {
result[0] = execScalar<OpType>(
x,
xShapeInfo,
extraParamsVals,
y,
yShapeInfo);
return;
}
char xOrder = shape::order(xShapeInfo);
char yOrder = shape::order(yShapeInfo);
if(xOrder != yOrder) {
Nd4jLong shapeIter[MAX_RANK];
Nd4jLong coord[MAX_RANK];
int dim;
Nd4jLong xStridesIter[MAX_RANK];
Nd4jLong yStridesIter[MAX_RANK];
auto xShape = shape::shapeOf(xShapeInfo);
auto xStride = shape::stride(xShapeInfo);
auto yStride = shape::stride(yShapeInfo);
int rank = shape::rank(xShapeInfo);
if(PrepareTwoRawArrayIter<T>(rank,
xShape,
x,
xStride,
y,
yStride,
&rank,
shapeIter,
&x,
xStridesIter,
&y,
yStridesIter) >= 0) {
auto resultLength = shape::length(resultShapeInfoBuffer);
auto tadLength = shape::tadLength(xShapeInfo,dimension,dimensionLength);
ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); {
Nd4jLong xOffset = shape::getOffset(0,xShape,xStride,coord,rank);
auto reductionIndex = xOffset / resultLength;
result[reductionIndex] = OpType::update(result[reductionIndex], OpType::op(x[0],y[0], extraParamsVals), extraParamsVals);
} ND4J_RAW_ITER_TWO_NEXT(dim,
rank,
coord,
shapeIter,
x,
xStridesIter,
y,
yStridesIter);
//#pragma omp parallel for proc_bind(AFFINITY) default(shared)
for(Nd4jLong i = 0; i < resultLength ;i++) {
result[i] = OpType::postProcess(result[i],tadLength, extraParamsVals);
}
}
else {
printf("Unable to prepare array\n");
}
}
else {
T startingVal = OpType::startingValue(x);
Nd4jLong resultLength = shape::length(resultShapeInfoBuffer);
shape::TAD xTad(xShapeInfo, dimension, dimensionLength);
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
shape::TAD yTad(yShapeInfo, dimension, dimensionLength);
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
/**
* The element wise stride belong longs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along long arr
* we can use arr.stride(1) as a representation
* along long which to iterate.
*/
int largerElementWiseStride;
int smallerElementWiseStride;
auto xElementWiseStride = shape::elementWiseStride(xTad.tadOnlyShapeInfo);
auto yElementWiseStride = shape::elementWiseStride(yTad.tadOnlyShapeInfo);
int tadLength;
Nd4jLong xModLength;
Nd4jLong yModLength;
Nd4jLong *iterationTadInfo;
bool xTadBigger;
if(shape::length(xShapeInfo) > shape::length(yShapeInfo)) {
tadLength = shape::length(xTad.tadOnlyShapeInfo);
iterationTadInfo = xTad.tadOnlyShapeInfo;
largerElementWiseStride = shape::elementWiseStride(xShapeInfo);
smallerElementWiseStride = shape::elementWiseStride(yShapeInfo);
xModLength = 1;
yModLength = tadLength;
xTadBigger = true;
}
else {
tadLength = shape::length(yTad.tadOnlyShapeInfo);
iterationTadInfo = yTad.tadOnlyShapeInfo;
largerElementWiseStride = shape::elementWiseStride(yShapeInfo);
smallerElementWiseStride = shape::elementWiseStride(xShapeInfo);
xModLength = tadLength;
yModLength = 1;
xTadBigger = false;
}
if (largerElementWiseStride >= 1 && smallerElementWiseStride >= 1 && xElementWiseStride >= 1 && yElementWiseStride >= 1) {
if(shape::length(xShapeInfo) == shape::length(yShapeInfo)) {
//#pragma omp parallel for proc_bind(AFFINITY) default(shared)
for (Nd4jLong i = 0; i < resultLength; i++) {
T *localExtraParams = nullptr;
if (OpType::extraParamsLen > 0)
localExtraParams = new T[OpType::extraParamsLen];
for (int extraParamsIdx = 0; extraParamsIdx < OpType::extraParamsLen; extraParamsIdx++) {
localExtraParams[extraParamsIdx] = startingVal;
}
Nd4jLong offset = xTad.tadOffsets[i];
Nd4jLong yOffset = yTad.tadOffsets[i];
result[i] = OpType::op(x[offset], y[yOffset], localExtraParams);
for (int j = 1; j < tadLength; j++) {
int xIdx = (offset + xElementWiseStride * j);
int yIdx = (yOffset + yElementWiseStride * j);
result[i] = OpType::update(result[i], OpType::op(x[xIdx],
y[yIdx],
localExtraParams), localExtraParams);
}
result[i] = OpType::postProcess(result[i], tadLength, localExtraParams);
if (localExtraParams != nullptr)
delete[] localExtraParams;
}
}
else {
int tadsPerThread = resultLength / TAD_THRESHOLD;
int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread);
num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads());
//#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared)
for (int i = 0; i < resultLength; i++) {
Nd4jLong xOffset = xTadBigger ? xTad.tadOffsets[i] : 0;
Nd4jLong yOffset = !xTadBigger ? yTad.tadOffsets[i] : 0;
auto xShape = xTadBigger ? xTad.tadShape : shape::shapeOf(xShapeInfo);
auto yShape = !xTadBigger ? yTad.tadShape : shape::shapeOf(yShapeInfo);
auto xStride = xTadBigger ? xTad.tadStride : shape::stride(xShapeInfo);
auto yStride = !xTadBigger ? yTad.tadStride : shape::stride(yShapeInfo);
int xRank = xTadBigger ? shape::rank(xTad.tadOnlyShapeInfo) : shape::rank(xShapeInfo);
int yRank = !xTadBigger ? shape::rank(yTad.tadOnlyShapeInfo) : shape::rank(yShapeInfo);
Nd4jLong coord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
T start = OpType::startingValue(x);
for (int j = 0; j < tadLength; j++) {
if(xTadBigger) {
shape::ind2subC(shape::rank(xTad.tadOnlyShapeInfo),
xTad.tadStride, j, coord);
shape::ind2subC(shape::rank(yShapeInfo),
shape::shapeOf(yShapeInfo), j, yCoord);
}
else {
shape::ind2subC(shape::rank(xShapeInfo), shape::shapeOf(xShapeInfo), j, coord);
shape::ind2subC(shape::rank(yTad.tadOnlyShapeInfo),
yTad.tadShape, j, yCoord);
}
int xOffset2 = shape::getOffset(xOffset,xShape,xStride,coord,xRank);
int yOffset2 = shape::getOffset(yOffset,yShape,yStride,yCoord,yRank);
start = OpType::update(start, OpType::op(x[xOffset2], y[yOffset2],extraParams), extraParamsVals);
}
result[i] = OpType::postProcess(start, shape::length(iterationTadInfo), extraParamsVals);
}
}
} else {
shape::TAD xTad(xShapeInfo, dimension, dimensionLength);
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
shape::TAD yTad(yShapeInfo, dimension, dimensionLength);
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
int tadsPerThread = resultLength / TAD_THRESHOLD;
int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread);
num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads());
Nd4jLong coord[MAX_RANK];
//#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) private(coord)
for (int i = 0; i < resultLength; i++) {
Nd4jLong xOffset = xTad.tadOffsets[i];
Nd4jLong yOffset = yTad.tadOffsets[i];
T start = OpType::startingValue(x + xOffset);
for (int j = 0; j < tadLength; j++) {
shape::ind2subC(shape::rank(iterationTadInfo), shape::shapeOf(iterationTadInfo), j, coord);
Nd4jLong xOffset2 = shape::getOffset(xOffset,shape::shapeOf(xTad.tadOnlyShapeInfo),shape::stride(xTad.tadOnlyShapeInfo),coord,shape::rank(xTad.tadOnlyShapeInfo));
Nd4jLong yOffset2 = shape::getOffset(yOffset,shape::shapeOf(yTad.tadOnlyShapeInfo),shape::stride(yTad.tadOnlyShapeInfo),coord,shape::rank(yTad.tadOnlyShapeInfo));
start = OpType::update(start, OpType::op(x[xOffset2], y[yOffset2],extraParamsVals), extraParamsVals);
}
result[i] = OpType::postProcess(start, shape::length(iterationTadInfo), extraParamsVals);
}
}
}
}
};
}
}
#ifdef __CUDACC__
/**
* The driver api
* @param opNum the number
* @param n the length of the reduce
* @param dx the input data
* @param xShapeInfo the shape information
* @param dy the pair wise reduce
* @param yShapeInfo the shape information for y
* @param extraParams the extra parameters in the operation
* @param result where to store the result
* @param resultShapeInfo the shape information
* @param gpuInformation the gpu information
* @param dimension the dimension to reduce along long
* @param dimensionLength the dimension length
* @param postProcessOrNot whether to post
*/
template <typename T>
__device__ void reduce3Generic(
const int opNum,
T *dx,
Nd4jLong *xShapeInfo,
T *dy,
Nd4jLong *yShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo));
}
__syncthreads();
functions::reduce3::Reduce3<T>::exec(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot,
allocationPointer,
manager,
tadOnlyShapeInfo,
tadOffsets,
yTadOnlyShapeInfo,
yTadOffsets);
}
template <typename T>
__device__ void reduce3AllGeneric(
const int opNum,
T *dx,
Nd4jLong *xShapeInfo,
T *dy,
Nd4jLong *yShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationPointer,
Nd4jLong *tadOnlyShapeInfo,
Nd4jLong *tadOffsets,
Nd4jLong *yTadOnlyShapeInfo,
Nd4jLong *yTadOffsets) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo));
}
__syncthreads();
functions::reduce3::Reduce3<T>::execAllCuda(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot,
allocationPointer,
manager,
tadOnlyShapeInfo,
tadOffsets,
yTadOnlyShapeInfo,
yTadOffsets);
}
template <typename T>
__device__ void reduce3ScalarGeneric(
int opNum,
T *dx,
Nd4jLong *xShapeInfo,
T *dy,
Nd4jLong *yShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo, int *allocationPointer,
T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo));
}
__syncthreads();
functions::reduce3::Reduce3<T>::execScalarCuda(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
allocationPointer,
reductionBuffer,
manager,
tadOnlyShapeInfo);
}
/**
* The driver api
* @param opNum the number
* @param n the length of the reduce
* @param dx the input data
* @param xShapeInfo the shape information
* @param dy the pair wise reduce
* @param yShapeInfo the shape information for y
* @param extraParams the extra parameters in the operation
* @param result where to store the result
* @param resultShapeInfo the shape information
* @param dimension the dimension to reduce along long
* @param dimensionLength the dimension length
* @param postProcessOrNot whether to post [
*/
extern "C"
__global__ void reduce3Double(
int opNum,
double *dx,
Nd4jLong *xShapeInfo,
double *dy,
Nd4jLong *yShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
reduce3Generic<double>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
}
extern "C"
__global__ void reduce3AllDouble(
int opNum,
double *dx,
Nd4jLong *xShapeInfo,
double *dy,
Nd4jLong *yShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
reduce3AllGeneric<double>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
}
/**
* The driver api
* @param opNum the number
* @param n the length of the reduce
* @param dx the input data
* @param xShapeInfo the shape information
* @param dy the pair wise reduce
* @param yShapeInfo the shape information for y
* @param extraParams the extra parameters in the operation
* @param result where to store the result
* @param resultShapeInfo the shape information
* @param gpuInformation the gpu information
* @param dimension the dimension to reduce along long
* @param dimensionLength the dimension length
* @param postProcessOrNot whether to post [
*/
extern "C"
__global__ void reduce3Float(
int opNum,
float *dx,
Nd4jLong *xShapeInfo,
float *dy,
Nd4jLong *yShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
reduce3Generic<float>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
}
extern "C"
__global__ void reduce3AllFloat(
int opNum,
float *dx,
Nd4jLong *xShapeInfo,
float *dy,
Nd4jLong *yShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
reduce3AllGeneric<float>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
}
extern "C"
__global__ void reduce3Half(
int opNum,
float16 *dx,
Nd4jLong *xShapeInfo,
float16 *dy,
Nd4jLong *yShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
reduce3Generic<float16>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
}
extern "C"
__global__ void reduce3AllHalf(
int opNum,
float16 *dx,
Nd4jLong *xShapeInfo,
float16 *dy,
Nd4jLong *yShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
reduce3AllGeneric<float16>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
}
extern "C"
__global__ void reduce3ScalarFloat(
int opNum,
float *dx,
Nd4jLong *xShapeInfo,
float *dy,
Nd4jLong *yShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
reduce3ScalarGeneric<float>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo, allocationPointer,
reductionBuffer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
}
extern "C" __global__ void reduce3ScalarHalf(
int opNum,
float16 *dx,
Nd4jLong *xShapeInfo,
float16 *dy,
Nd4jLong *yShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
reduce3ScalarGeneric<float16>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo, allocationPointer,
reductionBuffer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
}
extern "C"
__global__ void reduce3ScalarDouble(
int opNum,
double *dx,
Nd4jLong *xShapeInfo,
double *dy,
Nd4jLong *yShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) {
reduce3ScalarGeneric<double>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo, allocationPointer,
reductionBuffer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
}
#endif
#endif /* REDUCE3_H_ */
|
omp_parallel_for_if.c | <ompts:test>
<ompts:testdescription>Test which checks the omp parallel for if directive. Needs at least two threads.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp parallel for if</ompts:directive>
<ompts:dependences></ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_parallel_for_if</ompts:testcode:functionname>(FILE * logFile){
int known_sum;
<ompts:orphan:vars>
int num_threads;
int sum, sum2;
int i;
int control;
</ompts:orphan:vars>
control = 0;
num_threads=0;
sum = 0;
sum2 = 0;
#pragma omp parallel for private(i) <ompts:check>if (control==1)</ompts:check>
<ompts:orphan>
for (i=0; i <= LOOPCOUNT; i++)
{
num_threads = omp_get_num_threads();
sum = sum + i;
} /*end of for*/
</ompts:orphan>
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
fprintf (logFile, "Number of threads determined by omp_get_num_threads: %d\n", num_threads);
return (known_sum == sum && num_threads == 1);
} /* end of check_parallel_for_private */
</ompts:testcode>
</ompts:test>
|
GB_subassign_19.c | //------------------------------------------------------------------------------
// GB_subassign_19: C(I,J)<!M,repl> += scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 19: C(I,J)<!M,repl> += scalar ; using S
// M: present
// Mask_comp: true
// C_replace: true
// accum: present
// A: scalar
// S: constructed
// C: not bitmap
// M: not bitmap
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_19
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const GrB_BinaryOp accum,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (&S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_GET_C ; // C must not be bitmap
const int64_t *GB_RESTRICT Ch = C->h ;
const int64_t *GB_RESTRICT Cp = C->p ;
const bool C_is_hyper = (Ch != NULL) ;
const int64_t Cnvec = C->nvec ;
GB_GET_MASK ;
GB_GET_S ;
GB_GET_ACCUM_SCALAR ;
//--------------------------------------------------------------------------
// Method 19: C(I,J)<!M,repl> += scalar ; using S
//--------------------------------------------------------------------------
// Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is
// required. The sparsity of !M cannot be exploited.
// Methods 13, 15, 17, and 19 are very similar.
//--------------------------------------------------------------------------
// Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19)
//--------------------------------------------------------------------------
GB_SUBASSIGN_IXJ_SLICE ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
GB_GET_VECTOR_FOR_IXJ (M, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M,repl> += scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// accumulate the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
// both S (i,j) and A (i,j) present
GB_C_S_LOOKUP ;
if (mij)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_withaccum_C_A_1_scalar ;
}
else
{
// ----[C A 0] or [X A 0]---------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): zombie
GB_DELETE_ENTRY ;
}
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
}
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
GB_GET_VECTOR_FOR_IXJ (M, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M,repl> += scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// accumulate the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
}
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
trmv_c_csr_n_lo_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
#include<stdlib.h>
static alphasparse_status_t
trmv_x_csr_n_lo_conj_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mule(y[i], beta);
}
ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT);
for(ALPHA_INT i = 0; i < num_threads; i++)
{
y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT);
memset(y_local[i], '\0', sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_Number tmp;
for(ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i]; ++ai)
{
const ALPHA_INT col = A->col_indx[ai];
if(col <= i)
{
alpha_setzero(tmp);
cmp_conj(tmp, A->values[ai]);
alpha_mul(tmp, alpha, tmp);
alpha_madde(y_local[tid][col], tmp, x[i]);
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT row = 0; row < m; row++)
for(ALPHA_INT i = 0; i < num_threads; i++)
alpha_adde(y[row], y_local[i][row]);
for(ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_free(y_local[i]);
}
alpha_free(y_local);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
// return trmv_x_csr_n_lo_conj_serial(alpha, A, x, beta, y);
return trmv_x_csr_n_lo_conj_omp(alpha, A, x, beta, y);
}
|
nqueens.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
/*
* Original code from the Cilk project (by Keith Randall)
*
* Copyright (c) 2000 Massachusetts Institute of Technology
* Copyright (c) 2000 Matteo Frigo
*/
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>
#include <alloca.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <omp.h>
#include "BenchmarksUtil.h"
/* Checking information */
static int solutions[] = {
1,
0,
0,
2,
10, /* 5 */
4,
40,
92,
352,
724, /* 10 */
2680,
14200,
73712,
365596,
};
#define MAX_SOLUTIONS sizeof(solutions)/sizeof(int)
//#ifdef FORCE_TIED_TASKS
//int mycount=0;
//#pragma omp threadprivate(mycount)
int total_count, total_count_seq;
int if_cutoff, final_cutoff, manual_cutoff;
int cutoff_value =3;
/*
* <a> contains array of <n> queen positions. Returns 1
* if none of the queens conflict, and returns 0 otherwise.
*/
int ok(int n, char *a)
{
int i, j;
char p, q;
for (i = 0; i < n; i++) {
p = a[i];
for (j = i + 1; j < n; j++) {
q = a[j];
if (q == p || q == p - (j - i) || q == p + (j - i))
return 0;
}
}
return 1;
}
void nqueens_ser (int n, int j, char *a, int *solutions)
{
int res;
int i;
if (n == j) {
/* good solution, count it */
*solutions = 1;
return;
}
*solutions = 0;
/* try each possible position for queen <j> */
for (i = 0; i < n; i++) {
{
/* allocate a temporary array and copy <a> into it */
a[j] = (char) i;
if (ok(j + 1, a)) {
nqueens_ser(n, j + 1, a,&res);
*solutions += res;
}
}
}
}
void nqueens_if(int n, int j, char *a, int *solutions, int depth)
{
int *csols;
int i;
if (n == j) {
/* good solution, count it */
*solutions = 1;
return;
}
*solutions = 0;
csols = alloca(n*sizeof(int));
memset(csols,0,n*sizeof(int));
/* try each possible position for queen <j> */
for (i = 0; i < n; i++) {
#pragma omp task untied if(depth < cutoff_value)
{
/* allocate a temporary array and copy <a> into it */
char * b = alloca(n * sizeof(char));
memcpy(b, a, j * sizeof(char));
b[j] = (char) i;
if (ok(j + 1, b))
nqueens_if(n, j + 1, b,&csols[i],depth+1);
}
}
#pragma omp taskwait
for ( i = 0; i < n; i++) *solutions += csols[i];
}
void nqueens_final(int n, int j, char *a, int *solutions, int depth)
{
int *csols;
int i;
if (n == j) {
/* good solution, count it */
*solutions += 1;
return;
}
char final = omp_in_final();
if ( !final ) {
*solutions = 0;
csols = alloca(n*sizeof(int));
memset(csols,0,n*sizeof(int));
}
/* try each possible position for queen <j> */
for (i = 0; i < n; i++) {
#pragma omp task untied final(depth+1 >= cutoff_value) mergeable
{
char *b;
int *sol;
if ( omp_in_final() && depth+1 > cutoff_value ) {
b = a;
sol = solutions;
} else {
/* allocate a temporary array and copy <a> into it */
b = alloca(n * sizeof(char));
memcpy(b, a, j * sizeof(char));
sol = &csols[i];
}
b[j] = i;
if (ok(j + 1, b))
nqueens_final(n, j + 1, b,sol,depth+1);
}
}
#pragma omp taskwait
if ( !final ) {
for ( i = 0; i < n; i++) *solutions += csols[i];
}
}
void nqueens_manual(int n, int j, char *a, int *solutions, int depth)
{
int *csols;
int i;
if (n == j) {
/* good solution, count it */
*solutions = 1;
return;
}
*solutions = 0;
csols = alloca(n*sizeof(int));
memset(csols,0,n*sizeof(int));
/* try each possible position for queen <j> */
for (i = 0; i < n; i++) {
if ( depth < cutoff_value ) {
#pragma omp task untied
{
/* allocate a temporary array and copy <a> into it */
char * b = alloca(n * sizeof(char));
memcpy(b, a, j * sizeof(char));
b[j] = (char) i;
if (ok(j + 1, b))
nqueens_manual(n, j + 1, b,&csols[i],depth+1);
}
} else {
a[j] = (char) i;
if (ok(j + 1, a))
nqueens_ser(n, j + 1, a,&csols[i]);
}
}
#pragma omp taskwait
for ( i = 0; i < n; i++) *solutions += csols[i];
}
void nqueens(int n, int j, char *a, int *solutions, int depth)
{
int *csols;
int i;
if (n == j) {
/* good solution, count it */
*solutions = 1;
return;
}
*solutions = 0;
csols = alloca(n*sizeof(int));
memset(csols,0,n*sizeof(int));
/* try each possible position for queen <j> */
for (i = 0; i < n; i++) {
#pragma omp task untied
{
/* allocate a temporary array and copy <a> into it */
char * b = alloca(n * sizeof(char));
memcpy(b, a, j * sizeof(char));
b[j] = (char) i;
if (ok(j + 1, b))
nqueens(n, j + 1, b,&csols[i],depth); //FIXME: depth or depth+1 ???
}
}
#pragma omp taskwait
for ( i = 0; i < n; i++) *solutions += csols[i];
}
void find_queens_seq (int size)
{
total_count_seq = 0;
fprintf(stdout,"Computing N-Queens algorithm (n=%d) ", size);
char *a;
a = alloca(size * sizeof(char));
nqueens_ser(size, 0, a, &total_count_seq);
fprintf(stdout," completed!\n");
}
void find_queens (int size)
{
total_count=0;
fprintf(stdout,"Computing N-Queens algorithm (n=%d) ", size);
if (if_cutoff) {
#pragma omp parallel
{
#pragma omp single
{
char *a;
a = alloca(size * sizeof(char));
nqueens_if(size, 0, a, &total_count,0);
}
}
}
else if (manual_cutoff) {
#pragma omp parallel
{
#pragma omp single
{
char *a;
a = alloca(size * sizeof(char));
nqueens_manual(size, 0, a, &total_count,0);
}
}
}
else if (final_cutoff) {
#pragma omp parallel
{
#pragma omp single
{
char *a;
a = alloca(size * sizeof(char));
nqueens_final(size, 0, a, &total_count,0);
}
}
}
else {
#pragma omp parallel
{
#pragma omp single
{
char *a;
a = alloca(size * sizeof(char));
nqueens(size, 0, a, &total_count,0);
}
}
}
fprintf(stdout," completed!\n");
}
int verify_queens (int size)
{
if ( size > MAX_SOLUTIONS ) return -1;
if ( total_count == solutions[size-1] && total_count == total_count_seq) return 1;
return 0;
}
void print_usage() {
fprintf(stderr, "\n");
fprintf(stderr, "Usage: %s -[options]\n", "N-Queens");
fprintf(stderr, "\n");
fprintf(stderr, "Where options are:\n");
fprintf(stderr, " -n <number> : Board size\n");
fprintf(stderr, " -a <flag> : Set if-cutoff on\n");
fprintf(stderr, " -b <flag> : Set manual-cutoff on\n");
fprintf(stderr, " -c <flag> : Set final-cutoff on (choose one or none)\n");
fprintf(stderr, " -h : Print program's usage (this help).\n");
fprintf(stderr, "\n");
}
int main(int argc, char* argv[]) {
int size = 14, i;
for (i=1; i<argc; i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'n': /* read argument size 0 */
argv[i][1] = '*';
i++;
if (argc == i) { "Erro\n"; exit(100); }
size = atoi(argv[i]);
break;
case 'a': /* read argument size 0 */
argv[i][1] = '*';
//i++;
//if (argc == i) { "Erro\n"; exit(100); }
if_cutoff = 1;
manual_cutoff = 0;
final_cutoff = 0;
break;
case 'b': /* read argument size 0 */
argv[i][1] = '*';
//i++;
//if (argc == i) { "Erro\n"; exit(100); }
manual_cutoff = 1;
if_cutoff = 0;
final_cutoff = 0;
break;
case 'c': /* read argument size 0 */
argv[i][1] = '*';
//i++;
//if (argc == i) { "Erro\n"; exit(100); }
final_cutoff = 1;
if_cutoff = 0;
manual_cutoff = 0;
break;
case 'h': /* print usage */
argv[i][1] = '*';
print_usage();
exit (100);
break;
}
}
}
double t_start, t_end;
t_start = rtclock();
find_queens(size);
t_end = rtclock();
fprintf(stdout, "Parallel Runtime: %0.6lfs\n", t_end - t_start);
t_start = rtclock();
find_queens_seq(size);
t_end = rtclock();
fprintf(stdout, "Sequential Runtime: %0.6lfs\n", t_end - t_start);
if (verify_queens(size) == 1) {
fprintf(stdout, "Result: Successful\n");
} else {
fprintf(stdout, "Result: Unsuccessful\n");
}
return 0;
} |
axpy_int.c | //axpy.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 1200
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(int *X, int *Y) {
for (int i = 0; i<N; i++) {
X[i] = (int)rand()/(int)(RAND_MAX/10.0);
Y[i] = (int)rand()/(int)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
void axpy(int *X, int *Y, int a) {
#pragma omp simd
for (int i = 0; i<N; i++) {
Y[i] += a * X[i];
}
}
// Debug functions
void axpy_serial(int *X, int *Y, int a) {
for (int i = 0; i<N; i++) {
Y[i] += a * X[i];
}
}
void print_vector(int *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%d ", vector[i]);
}
puts("]");
}
int check(int *A, int *B){
int difference = 0;
for(int i = 0;i<N; i++){
difference += A[i]- B[i];
}
return difference;
}
int main(int argc, char **argv) {
//Set everything up
int *X = malloc(sizeof(int)*N);
int *Y = malloc(sizeof(int)*N);
int *Y_serial = malloc(sizeof(int)*N);
int a = 3;
srand(time(NULL));
init(X, Y);
for (int i = 0; i<N; i++) Y_serial[i] = Y[i];
print_vector(Y);
print_vector(X);
printf("%d\n", a);
puts("=\n");
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
axpy(X, Y, a);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
axpy_serial(X, Y_serial, a);
double t_serial = (read_timer() - start_serial);
print_vector(Y);
puts("---------------------------------");
print_vector(Y_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("AXPY (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("AXPY (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
printf("Correctness check: %d\n", check(Y,Y_serial));
free(X);
free(Y);
free(Y_serial);
return 0;
}
|
hermv_c_bsr_n_lo_trans.c | #include<string.h>
#ifdef _OPENMP
#include<omp.h>
#endif
#include"alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_BSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows * A->block_size;
const ALPHA_INT n = A->cols * A->block_size;
const ALPHA_INT bs = A->block_size;
const ALPHA_INT bs2 = bs * bs;
// assert(m==n);
ALPHA_INT b_rows = A->rows;
ALPHA_INT b_cols = A->cols;
if (b_rows != b_cols)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_INT partition[thread_num + 1];
balanced_partition_row_by_nnz(A->rows_end, b_rows, thread_num, partition);
ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num);
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
const ALPHA_INT tid = alpha_get_thread_id();
const ALPHA_INT local_m_s = partition[tid];
const ALPHA_INT local_m_e = partition[tid + 1];
tmp[tid] = (ALPHA_Number *)malloc(sizeof(ALPHA_Number) * b_rows * bs);
memset(tmp[tid], 0, sizeof(ALPHA_Number) * b_rows * bs);
if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR)
{
for (ALPHA_INT br = local_m_s; br < local_m_e; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = block_start; ai < lower_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
//dignaol entry A(row+b_row,col+b_col)
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_row * (bs + 1)], x[col + b_row]);
for (ALPHA_INT b_col = 0; b_col < b_row; b_col++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
}
}
}
else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR)
{
for (ALPHA_INT br = local_m_s; br < local_m_e; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = block_start; ai < lower_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
alpha_madde(tmp[tid][b_col + row], A->values[a0_idx + b_col * (bs + 1)], x[b_col + col]);
for (ALPHA_INT b_row = b_col + 1; b_row < bs; b_row++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < b_cols * bs; ++i)
{
ALPHA_Number tmp_y;
alpha_setzero(tmp_y);
for (ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(tmp_y, tmp_y, tmp[j][i]);
}
alpha_mul(y[i], y[i], beta);
alpha_madde(y[i], tmp_y, alpha);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
free(tmp[i]);
}
free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
omp_ex_25.c | #include <stdio.h>
#include <omp.h>
/*
MIT License
Copyright (c) 2019 NOUREDDINE DAGHBOUDJ
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
int main()
{
unsigned a = 90;
printf("Before a = %i\n", a);
printf("Inside a = ");
#pragma omp parallel for lastprivate(a)
for(unsigned int i=0; i<16; i++)
{
a = 10 + i;
printf("%i ", a);
}
printf("\n");
printf("After a = %i\n", a);
return 0;
}
|
flat_l1.c | /*******************************************************************************
* Copyright 2019 UChicago Argonne, LLC.
* (c.f. AUTHORS, LICENSE)
*
* This file is part of the AML project.
* For more info, see https://github.com/anlsys/aml
*
* SPDX-License-Identifier: BSD-3-Clause
*******************************************************************************/
/*
* This is a benchmark for the BLAS Level 1 operations for AML.
*/
#include <assert.h>
#include <stdio.h>
#include <unistd.h>
#include "aml.h"
#include "aml/area/linux.h"
#include "blas/l1_kernel.h"
#include "blas/verify_l1.h"
#include "utils.h"
/* Look into another way to define these parameters */
#define DEFAULT_ARRAY_SIZE (1UL << 15)
#ifdef NTIMES
#if NTIMES <= 1
#define NTIMES 10
#endif
#endif
#ifndef NTIMES
#define NTIMES 10
#endif
#define OFFSET 0
#ifndef MIN
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#endif
#ifndef MAX
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#endif
static double *a, *b, *c;
typedef double (*r)(size_t, double *, double *, double *, double);
r run_f[8] = {&dcopy, &dscal, &daxpy, &dasum, &ddot, &dnrm2, &dswap, &idmax};
v verify_f[8] = {&verify_dcopy, &verify_dscal, &verify_daxpy, &verify_dasum,
&verify_ddot, &verify_dnrm2, &verify_dswap, &verify_idmax};
int main(int argc, char *argv[])
{
aml_init(&argc, &argv);
size_t i, j, k;
size_t nb_reps;
size_t memsize;
double dscalar;
long long int timing;
long long int sumtime[10] = {0}, maxtime[10] = {0},
mintime[10] = {LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX,
LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX,
LONG_MAX, LONG_MAX};
char *label[10] = {
"Copy: ", "Scale: ", "Triad: ", "Asum: ",
"Dot: ", "Norm: ", "Swap: ", "Max ID: ",
"RotP: ", "RotM: "};
if (argc == 1) {
memsize = DEFAULT_ARRAY_SIZE;
nb_reps = NTIMES;
} else {
assert(argc == 2);
memsize = 1UL << atoi(argv[1]);
nb_reps = atoi(argv[2]);
}
printf("Each kernel will be executed %ld times.\n", nb_reps);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf("Number of threads required = %li\n", k);
}
}
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf("Number of threads counted = %li\n", k);
size_t size = sizeof(double) * (memsize + OFFSET);
// AML code
struct aml_area *area = &aml_area_linux;
a = aml_area_mmap(area, size, NULL);
b = aml_area_mmap(area, size, NULL);
c = aml_area_mmap(area, size, NULL);
/* MAIN LOOP - repeat test cases nb_reps */
dscalar = 3.0;
double x = 1.0, y = 2.0;
double param[5];
param[0] = -1.0;
for (k = 1; k < 5; k++)
param[k] = k;
double res;
aml_time_t start, end;
for (k = 0; k < nb_reps; k++) {
// Trying this array of functions thing
for (i = 0; i < 8; i++) {
init_arrays(memsize, a, b, c);
aml_gettime(&start);
res = run_f[i](memsize, a, b, c, dscalar);
aml_gettime(&end);
timing = aml_timediff(start, end);
verify_f[i](memsize, a, b, c, dscalar, res);
sumtime[i] += timing;
mintime[i] = MIN(mintime[i], timing);
maxtime[i] = MAX(maxtime[i], timing);
}
// Rotations
init_arrays(memsize, a, b, c);
aml_gettime(&start);
drot(memsize, a, b, x, y);
aml_gettime(&end);
timing = aml_timediff(start, end);
verify_drot(memsize, a, b, c, x, y, res);
sumtime[8] += timing;
mintime[8] = MIN(mintime[i], timing);
maxtime[8] = MAX(maxtime[i], timing);
init_arrays(memsize, a, b, c);
aml_gettime(&start);
drotm(memsize, a, b, param);
aml_gettime(&end);
timing = aml_timediff(start, end);
verify_drotm(memsize, a, b, c, x, y, res);
sumtime[9] += timing;
mintime[9] = MIN(mintime[i], timing);
maxtime[9] = MAX(maxtime[i], timing);
/* Add the rotation generations later, + 2 functions
drotg(x, y, dc, ds);
drotmg(d1, d2, x, y, param);
*/
}
/* SUMMARY */
printf("Function Avg time Min time Max time\n");
for (j = 0; j < 10; j++) {
double avg = (double)sumtime[j] / (double)(nb_reps - 1);
printf("%s\t%11.6f\t%lld\t%lld\n", label[j], avg, mintime[j],
maxtime[j]);
}
/* aml specific code */
aml_area_munmap(area, a, size);
aml_area_munmap(area, b, size);
aml_area_munmap(area, c, size);
aml_finalize();
return 0;
}
|
task_array2.c | #include <omp.h>
typedef int int_arr[1];
int main() {
int_arr A[1];
#pragma omp task firstprivate(A)
{
A[0][0] = 0;
}
return 0;
}
|
threading.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for
* license information.
*/
#ifndef LIGHTGBM_UTILS_THREADING_H_
#define LIGHTGBM_UTILS_THREADING_H_
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <algorithm>
#include <functional>
#include <vector>
namespace LightGBM
{
class Threading
{
public:
template <typename INDEX_T>
static inline void BlockInfo(INDEX_T cnt, INDEX_T min_cnt_per_block,
int *out_nblock, INDEX_T *block_size)
{
int num_threads = OMP_NUM_THREADS();
BlockInfo<INDEX_T>(num_threads, cnt, min_cnt_per_block, out_nblock,
block_size);
}
template <typename INDEX_T>
static inline void BlockInfo(int num_threads, INDEX_T cnt,
INDEX_T min_cnt_per_block, int *out_nblock,
INDEX_T *block_size)
{
*out_nblock = std::min<int>(
num_threads,
static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block));
if (*out_nblock > 1)
{
*block_size = SIZE_ALIGNED((cnt + (*out_nblock) - 1) / (*out_nblock));
}
else
{
*block_size = cnt;
}
}
template <typename INDEX_T>
static inline void BlockInfoForceSize(int num_threads, INDEX_T cnt,
INDEX_T min_cnt_per_block,
int *out_nblock, INDEX_T *block_size)
{
*out_nblock = std::min<int>(
num_threads,
static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block));
if (*out_nblock > 1)
{
*block_size = (cnt + (*out_nblock) - 1) / (*out_nblock);
// force the block size to the times of min_cnt_per_block
*block_size = (*block_size + min_cnt_per_block - 1) / min_cnt_per_block *
min_cnt_per_block;
}
else
{
*block_size = cnt;
}
}
template <typename INDEX_T>
static inline int For(
INDEX_T start, INDEX_T end, INDEX_T min_block_size,
const std::function<void(int, INDEX_T, INDEX_T)> &inner_fun)
{
int n_block = 1;
INDEX_T num_inner = end - start;
BlockInfo<INDEX_T>(end - start, min_block_size, &n_block, &num_inner);
OMP_INIT_EX();
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < n_block; ++i)
{
OMP_LOOP_EX_BEGIN();
INDEX_T inner_start = start + num_inner * i;
INDEX_T inner_end = std::min(end, inner_start + num_inner);
inner_fun(i, inner_start, inner_end);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
return n_block;
}
};
template <typename INDEX_T, bool TWO_BUFFER>
class ParallelPartitionRunner
{
public:
ParallelPartitionRunner(INDEX_T num_data, INDEX_T min_block_size)
: min_block_size_(min_block_size)
{
num_threads_ = OMP_NUM_THREADS();
left_.resize(num_data);
if (TWO_BUFFER)
{
right_.resize(num_data);
}
offsets_.resize(num_threads_);
left_cnts_.resize(num_threads_);
right_cnts_.resize(num_threads_);
left_write_pos_.resize(num_threads_);
right_write_pos_.resize(num_threads_);
}
~ParallelPartitionRunner() {}
void ReSize(INDEX_T num_data)
{
left_.resize(num_data);
if (TWO_BUFFER)
{
right_.resize(num_data);
}
}
template <bool FORCE_SIZE>
INDEX_T Run(
INDEX_T cnt,
const std::function<INDEX_T(int, INDEX_T, INDEX_T, INDEX_T *, INDEX_T *)> &func,
INDEX_T *out)
{
int nblock = 1;
INDEX_T inner_size = cnt;
if (FORCE_SIZE)
{
Threading::BlockInfoForceSize<INDEX_T>(num_threads_, cnt, min_block_size_,
&nblock, &inner_size);
}
else
{
Threading::BlockInfo<INDEX_T>(num_threads_, cnt, min_block_size_, &nblock,
&inner_size);
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static, 1) num_threads(num_threads_)
for (int i = 0; i < nblock; ++i)
{
OMP_LOOP_EX_BEGIN();
INDEX_T cur_start = i * inner_size;
INDEX_T cur_cnt = std::min(inner_size, cnt - cur_start);
offsets_[i] = cur_start;
if (cur_cnt <= 0)
{
left_cnts_[i] = 0;
right_cnts_[i] = 0;
continue;
}
auto left_ptr = left_.data() + cur_start;
INDEX_T *right_ptr = nullptr;
if (TWO_BUFFER)
{
right_ptr = right_.data() + cur_start;
}
// split data inner, reduce the times of function called
INDEX_T cur_left_count =
func(i, cur_start, cur_cnt, left_ptr, right_ptr);
if (!TWO_BUFFER)
{
// reverse for one buffer
std::reverse(left_ptr + cur_left_count, left_ptr + cur_cnt);
}
left_cnts_[i] = cur_left_count;
right_cnts_[i] = cur_cnt - cur_left_count;
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
left_write_pos_[0] = 0;
right_write_pos_[0] = 0;
for (int i = 1; i < nblock; ++i)
{
left_write_pos_[i] = left_write_pos_[i - 1] + left_cnts_[i - 1];
right_write_pos_[i] = right_write_pos_[i - 1] + right_cnts_[i - 1];
}
data_size_t left_cnt = left_write_pos_[nblock - 1] + left_cnts_[nblock - 1];
auto right_start = out + left_cnt;
#pragma omp parallel for schedule(static, 1) num_threads(num_threads_)
for (int i = 0; i < nblock; ++i)
{
std::copy_n(left_.data() + offsets_[i], left_cnts_[i],
out + left_write_pos_[i]);
if (TWO_BUFFER)
{
std::copy_n(right_.data() + offsets_[i], right_cnts_[i],
right_start + right_write_pos_[i]);
}
else
{
std::copy_n(left_.data() + offsets_[i] + left_cnts_[i], right_cnts_[i],
right_start + right_write_pos_[i]);
}
}
return left_cnt;
}
private:
int num_threads_;
INDEX_T min_block_size_;
std::vector<INDEX_T, mi_stl_allocator<INDEX_T>> left_;
std::vector<INDEX_T, mi_stl_allocator<INDEX_T>> right_;
std::vector<INDEX_T, mi_stl_allocator<INDEX_T>> offsets_;
std::vector<INDEX_T, mi_stl_allocator<INDEX_T>> left_cnts_;
std::vector<INDEX_T, mi_stl_allocator<INDEX_T>> right_cnts_;
std::vector<INDEX_T, mi_stl_allocator<INDEX_T>> left_write_pos_;
std::vector<INDEX_T, mi_stl_allocator<INDEX_T>> right_write_pos_;
};
} // namespace LightGBM
#endif // LightGBM_UTILS_THREADING_H_
|
Jacobi1D-DiamondByHand-OMP_static.test.c | /******************************************************************************
* Jacobi1D benchmark
* Tiled using diamond slabs coded by hand
*
* Usage:
* make omp
* export OMP_NUM_THREADS=8
* bin/Jacobi1D-DiamondSlabByHand-OMP \
* `cat src/Jacobi1D-DiamondSlabByHand-OMP.perfexecopts`
* For a run on 8 threads
******************************************************************************/
#include <stdio.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <getopt.h>
#include <ctype.h>
#include <stdbool.h>
#include <assert.h>
#include "util.h"
#define STENCIL(read,write,x) space[write][x] = (space[read][x-1] +\
space[read][x] +\
space[read][x+1])/3;
int countTiles( int tiles_start, int upperBound, int stride ){
int x0;
int count = 0;
for( x0 = tiles_start; x0 <= upperBound; x0 += stride ){
count += 1;
}
return count;
}
// main
// The steps taken in this code are the following:
// 1 - command line parsing
// 2 - data allocation and initialization
// 3 - jacobi 1D timed within tiling loop
// 4 - output and optional verification
//
int main( int argc, char* argv[] ) {
// rather than calling fflush
setbuf(stdout, NULL);
// 1 - command line parsing
Params cmdLineArgs;
parseCmdLineArgs(&cmdLineArgs,argc,argv);
// 2 - data allocation and initialization
// variables required for Jacobi
int lowerBound = 1;
int upperBound = lowerBound + cmdLineArgs.problemSize - 1;
// variables required for tiling
int width_min = (cmdLineArgs.width_max + -1 * cmdLineArgs.timeBand) -
(0 + 1 * cmdLineArgs.timeBand) +1;
// starting point for doing 'A' tiles loops
int tiles_A_start = lowerBound - cmdLineArgs.timeBand + 1;
// starting point for doing 'B' tiles loop
int tiles_B_start = tiles_A_start + cmdLineArgs.width_max;
// width between the first x0 point and next x0 point
int betweenTiles = width_min + cmdLineArgs.width_max;
// assert that this is a valid tile
assert( width_min >= 1 && cmdLineArgs.width_max >= width_min );
int count_A_tiles = countTiles( tiles_A_start, upperBound, betweenTiles );
int count_B_tiles = countTiles( tiles_B_start, upperBound, betweenTiles );
int A_tiles_per_core = max( 1, count_A_tiles / cmdLineArgs.cores );
int B_tiles_per_core = max( 1, count_B_tiles / cmdLineArgs.cores );
// allocate time-steps 0 and 1
double* space[2] = { NULL, NULL };
space[0] = (double*) malloc( (cmdLineArgs.problemSize + 2) * sizeof(double));
space[1] = (double*) malloc( (cmdLineArgs.problemSize + 2) * sizeof(double));
if( space[0] == NULL || space[1] == NULL ){
printf( "Could not allocate space array\n" );
exit(0);
}
// perform first touch in the same manner that the tile will use the data
int idx;
#pragma omp parallel for private(idx) schedule(static)
for( idx = tiles_A_start; idx <= upperBound; idx += betweenTiles ){
if(idx >= lowerBound){
int i;
for (i=idx;i<(idx+betweenTiles)&&i<upperBound;i++){
space[0][i] = 0;
space[1][i] = 0;
}
}
}
// use global seed to seed the random number gen (will be constant)
srand(cmdLineArgs.globalSeed);
// seed the space.
for( idx = lowerBound; idx <= upperBound; ++idx ){
space[0][idx] = rand() / (double)rand();
}
// set halo values (sanity)
space[0][0] = 0;
space[0][upperBound+1] = 0;
space[1][0] = 0;
space[1][upperBound+1] = 0;
int read, write;
int tau = cmdLineArgs.tau_runtime;
int T = cmdLineArgs.T;
int Ui = upperBound;
int Li = 1;
int thyme=-12345, k1=-12345, t=-12345, i=-12345;
//fprintf(stderr,"tau=%d\n", tau);
//fprintf(stderr,"%d, %d\n",floord(2, tau)-2, floord(T*2, tau));
// 4 - run the actual test
double start_time = omp_get_wtime();
for ( thyme = floord(2, tau)-2;
thyme <= floord(T*2, tau);
thyme += 1){
#pragma omp parallel for private(k1, t, write, read, i) schedule(static)
for ( k1 = (int)(Ui*2/((double) tau)-thyme+1 )/-2;
k1 <= (int)( (Li*2/((double) tau))-thyme-1)/ -2 ;
k1 += 1){
// printf("%d, %d, %d, %d\n", thyme, k1, t, i);
// begin inner loops over points in tile
for ( t = max(1, floord(thyme*tau - k1*tau + k1*tau + 1, 2));
t < min(T+1, tau + floord(thyme*tau - k1*tau + k1*tau, 2));
t += 1){
// printf("%d, %d, %d, %d\n", thyme, k1, t, i);
write = t & 1;
read = 1-write;
//read = (t - 1) & 1;
//write = 1 - read;
for ( i = max(Li, max(thyme*tau - k1*tau - t, -tau - k1*tau + t + 1));
i <=min(Ui, min(tau + thyme*tau - k1*tau - t - 1, -k1*tau + t));
i += 1){
//fprintf(stderr, "%02d, %02d, %d, %d\n", t,i, thyme, k1);
//printf("%d, %d\n", t, i);
STENCIL( read, write, i );
// (t, i);
} // i
} // t
} // k1
}// thyme
//STENCIL( read, write, idx ); // stencil computation
double end_time = omp_get_wtime();
double time = (end_time - start_time);
// 4 - output and optional verification
/*
printf( "p: %d, T: %d, c: %d",cmdLineArgs.problemSize,cmdLineArgs.T,
cmdLineArgs.cores);
*/
if( cmdLineArgs.printtime ){
printf( "Time: %f", time );
}
if( cmdLineArgs.verify ){
if(!verifyResultJacobi1D(space[cmdLineArgs.T & 1],cmdLineArgs.problemSize,
cmdLineArgs.globalSeed,cmdLineArgs.T )){
fprintf(stderr,"FAILURE\n");
}else{
fprintf(stderr,"SUCCESS\n");
}
}
return 1;
}
|
compression_omp.c | #include <stdlib.h>
#include <float.h>
#include <math.h>
#include <stdio.h>
#include <omp.h>
#include "image_io.h"
#include "compression.h"
void initialise_centers(byte_t *data, double *centers, int n_pixels, int n_channels, int n_clusters);
void assign_pixels(byte_t *data, double *centers, int *labels, double *distances, int *changed, int n_pixels, int n_channels, int n_clusters);
void update_centers(byte_t *data, double *centers, int *labels, double *distances, int n_pixels, int n_channels, int n_clusters);
void update_data(byte_t *data, double *centers, int *labels, int n_pixels, int n_channels);
void kmeans_compression_omp(byte_t *data, int width, int height, int n_channels, int n_clusters, int max_iterations, int n_threads)
{
int n_pixels = width * height;
int *labels = malloc(n_pixels * sizeof(int));
double *centers = malloc(n_clusters * n_channels * sizeof(double));
double *distances = malloc(n_pixels * sizeof(double));
omp_set_num_threads(n_threads);
double initialise_centers_time = 0;
double assign_pixels_time = 0;
double update_centers_time = 0;
double update_data_time = 0;
double start_time = omp_get_wtime();
initialise_centers(data, centers, n_pixels, n_channels, n_clusters);
initialise_centers_time += omp_get_wtime() - start_time;
int have_clusters_changed = 0;
for (int i = 0; i < max_iterations; i++) {
start_time = omp_get_wtime();
assign_pixels(data, centers, labels, distances, &have_clusters_changed, n_pixels, n_channels, n_clusters);
assign_pixels_time += omp_get_wtime() - start_time;
// if clusters haven't changed, they won't change in the next iteration as well, so just stop early
if (!have_clusters_changed) {
break;
}
start_time = omp_get_wtime();
update_centers(data, centers, labels, distances, n_pixels, n_channels, n_clusters);
update_centers_time += omp_get_wtime() - start_time;
}
start_time = omp_get_wtime();
update_data(data, centers, labels, n_pixels, n_channels);
update_data_time += omp_get_wtime() - start_time;
// double sum = initialise_centers_time + assign_pixels_time + update_centers_time + update_data_time;
// printf("%23s: %7.4lf\n", "initialise_centers_time", (initialise_centers_time / sum) * 100);
// printf("%23s: %7.4lf\n", "assign_pixels_time", (assign_pixels_time / sum) * 100);
// printf("%23s: %7.4lf\n", "update_centers_time", (update_centers_time / sum) * 100);
// printf("%23s: %7.4lf\n", "update_data_time", (update_data_time / sum) * 100);
free(centers);
free(labels);
free(distances);
}
void initialise_centers(byte_t *data, double *centers, int n_pixels, int n_channels, int n_clusters)
{
for (int cluster = 0; cluster < n_clusters; cluster++) {
// Pick a random pixel
int random_int = rand() % n_pixels;
// Set the random pixel as one of the centers
for (int channel = 0; channel < n_channels; channel++) {
// Save picked pixel's channels
centers[cluster * n_channels + channel] = data[random_int * n_channels + channel];
}
}
}
void assign_pixels(byte_t *data, double *centers, int *labels, double *distances, int *changed, int n_pixels, int n_channels, int n_clusters)
{
int have_clusters_changed = 0;
int pixel, cluster, channel, min_cluster, min_distance;
double tmp;
#pragma omp parallel for schedule(static) private(pixel, cluster, channel, min_cluster, min_distance, tmp)
for (pixel = 0; pixel < n_pixels; pixel++) {
double min_distance = DBL_MAX;
// calculate the distance between the pixel and each of the centers
for (cluster = 0; cluster < n_clusters; cluster++) {
double distance = 0;
for (int channel = 0; channel < n_channels; channel++) {
// calculate euclidean distance between the pixel's channels and the center's channels
tmp = (double)(data[pixel * n_channels + channel] - centers[cluster * n_channels + channel]);
distance += (tmp * tmp);
}
if (distance < min_distance) {
min_distance = distance;
min_cluster = cluster;
}
}
distances[pixel] = min_distance;
// if pixel's cluster has changed, update it and set 'has_changed' to True
if (labels[pixel] != min_cluster) {
labels[pixel] = min_cluster;
have_clusters_changed = 1;
}
}
// set the outside flag
*changed = have_clusters_changed;
}
void update_centers(byte_t *data, double *centers, int *labels, double *distances, int n_pixels, int n_channels, int n_clusters)
{
int *counts = malloc(n_clusters * sizeof(int));
// reset centers and initialise clusters' counters
for (int cluster = 0; cluster < n_clusters; cluster++) {
for (int channel = 0; channel < n_channels; channel++) {
centers[cluster * n_channels + channel] = 0;
}
counts[cluster] = 0;
}
int pixel, min_cluster, channel;
// compute partial sums of the centers and update clusters counters
#pragma omp parallel for private(pixel, min_cluster, channel) reduction(+:centers[:n_clusters * n_channels], counts[:n_clusters])
for (pixel = 0; pixel < n_pixels; pixel++) {
min_cluster = labels[pixel];
// sum without division
for (channel = 0; channel < n_channels; channel++) {
centers[min_cluster * n_channels + channel] += data[pixel * n_channels + channel];
}
counts[min_cluster] += 1;
}
// obtain the centers mean
for (int cluster = 0; cluster < n_clusters; cluster++) {
if (counts[cluster]) {
for (int channel = 0; channel < n_channels; channel++) {
centers[cluster * n_channels + channel] /= counts[cluster];
}
} else {
// if the cluster is empty, we find the farthest pixel from its cluster's center
double max_distance = 0;
int farthest_pixel = 0;
#pragma omp parallel
{
// these two variables are local for each thread
double max_distance_local = max_distance;
int farthest_pixel_local = farthest_pixel;
// disable synchronization after the loop with nowait
#pragma omp for nowait
for (int pixel = 0; pixel < n_pixels; pixel++) {
if (distances[pixel] > max_distance) {
max_distance_local = distances[pixel];
farthest_pixel_local = pixel;
}
}
// check if new maximum has been found
#pragma omp critical
{
if (max_distance_local > max_distance) {
max_distance = max_distance_local;
farthest_pixel = farthest_pixel_local;
}
}
}
// set the centers channels to the farthest pixel's channels
for (int channel = 0; channel < n_channels; channel++) {
centers[cluster * n_channels + channel] = data[farthest_pixel * n_channels + channel];
}
// TODO @jakobm why?
distances[farthest_pixel] = 0;
}
}
free(counts);
}
void update_data(byte_t *data, double *centers, int *labels, int n_pixels, int n_channels)
{
int pixel, min_cluster, channel;
#pragma omp parallel for schedule(static) private(pixel, channel, min_cluster)
for (pixel = 0; pixel < n_pixels; pixel++) {
min_cluster = labels[pixel];
for (channel = 0; channel < n_channels; channel++) {
data[pixel * n_channels + channel] = (byte_t)round(centers[min_cluster * n_channels + channel]);
}
}
} |
sorter.h | #ifndef __MY_SORTER_H__
#define __MY_SORTER_H__
/*
* Copyright 2014 Open Connectome Project (http://openconnecto.me)
* Written by Da Zheng (zhengda1936@gmail.com)
*
* This file is part of FlashMatrix.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <assert.h>
#include <memory>
#if defined(_OPENMP)
#include <parallel/algorithm>
#else
#include <algorithm>
#endif
namespace fm
{
class sorter
{
public:
virtual bool is_sorted(const char *data, size_t num, bool decreasing) const = 0;
virtual void sort_with_index(char *data, off_t *offs, size_t num,
bool decreasing) const = 0;
virtual void sort(char *data, size_t num, bool decreasing) const = 0;
virtual void serial_sort(char *data, size_t num, bool decreasing) const = 0;
virtual void merge(
const std::vector<std::pair<const char *, const char *> > &arrs,
char *output, size_t out_num) const = 0;
virtual void merge(
const std::vector<std::pair<const char *, const char *> > &arrs,
const std::vector<std::pair<int, off_t> > &merge_index,
char *output, size_t out_num) const = 0;
virtual void merge_with_index(
const std::vector<std::pair<const char *, const char *> > &arrs,
char *output, size_t out_num,
std::vector<std::pair<int, off_t> > &merge_index) const = 0;
};
template<class T>
class type_sorter: public sorter
{
struct {
bool operator()(const T &e1, const T &e2) const {
return e1 < e2;
}
} entry_less;
struct {
bool operator()(const T &e1, const T &e2) const {
return e1 > e2;
}
} entry_greater;
public:
virtual bool is_sorted(const char *data, size_t num, bool decreasing) const;
virtual void sort_with_index(char *data, off_t *offs, size_t num,
bool decreasing) const;
virtual void sort(char *data, size_t num, bool decreasing) const;
virtual void serial_sort(char *data, size_t num, bool decreasing) const;
virtual void merge(
const std::vector<std::pair<const char *, const char *> > &arrs,
char *output, size_t out_num) const;
virtual void merge(
const std::vector<std::pair<const char *, const char *> > &arrs,
const std::vector<std::pair<int, off_t> > &merge_index,
char *output, size_t out_num) const;
virtual void merge_with_index(
const std::vector<std::pair<const char *, const char *> > &arrs,
char *output, size_t out_num,
std::vector<std::pair<int, off_t> > &merge_index) const;
};
template<class T>
bool type_sorter<T>::is_sorted(const char *data1, size_t num, bool decreasing) const
{
T *data = (T *) data1;
if (decreasing)
return std::is_sorted(data, data + num, entry_greater);
else
return std::is_sorted(data, data + num, entry_less);
}
template<class T>
void type_sorter<T>::sort_with_index(char *data1, off_t *offs, size_t num,
bool decreasing) const
{
T *data = (T *) data1;
struct indexed_entry {
T val;
off_t idx;
};
std::unique_ptr<indexed_entry[]> entries
= std::unique_ptr<indexed_entry[]>(new indexed_entry[num]);
#pragma omp parallel for
for (size_t i = 0; i < num; i++) {
entries[i].val = data[i];
entries[i].idx = i;
}
struct {
bool operator()(const indexed_entry &e1, const indexed_entry &e2) const {
return e1.val < e2.val;
}
} entry_less;
struct {
bool operator()(const indexed_entry &e1, const indexed_entry &e2) const {
return e1.val > e2.val;
}
} entry_greater;
indexed_entry *start = entries.get();
indexed_entry *end = start + num;
#if defined(_OPENMP)
if (decreasing)
__gnu_parallel::sort(start, end, entry_greater);
else
__gnu_parallel::sort(start, end, entry_less);
#else
if (decreasing)
std::sort(start, end, entry_greater);
else
std::sort(start, end, entry_less);
#endif
#pragma omp parallel for
for (size_t i = 0; i < num; i++) {
data[i] = start[i].val;
offs[i] = start[i].idx;
}
}
template<class T>
void type_sorter<T>::sort(char *data1, size_t num, bool decreasing) const
{
T *data = (T *) data1;
T *start = (T *) data;
T *end = start + num;
#if defined(_OPENMP)
if (decreasing)
__gnu_parallel::sort(start, end, entry_greater);
else
__gnu_parallel::sort(start, end, entry_less);
#else
if (decreasing)
std::sort(start, end, entry_greater);
else
std::sort(start, end, entry_less);
#endif
}
template<class T>
void type_sorter<T>::serial_sort(char *data1, size_t num, bool decreasing) const
{
T *data = (T *) data1;
T *start = (T *) data;
T *end = start + num;
if (decreasing)
std::sort(start, end, entry_greater);
else
std::sort(start, end, entry_less);
}
template<class T>
void type_sorter<T>::merge(
const std::vector<std::pair<const char *, const char *> > &raw_arrs,
char *output, size_t out_num) const
{
std::vector<std::pair<T *, T *> > arrs(raw_arrs.size());
for (size_t i = 0; i < arrs.size(); i++)
arrs[i] = std::pair<T *, T *>((T *) raw_arrs[i].first,
(T *) raw_arrs[i].second);
#if defined(_OPENMP)
__gnu_parallel::multiway_merge(arrs.begin(), arrs.end(), (T *) output,
out_num, entry_less);
#else
assert(0);
#endif
}
/*
* Merge multiple arrays according to the specified locations.
* Here I assume there are a few number of arrays to merge.
*/
template<class T>
void type_sorter<T>::merge(
const std::vector<std::pair<const char *, const char *> > &arrs,
const std::vector<std::pair<int, off_t> > &merge_index,
char *output, size_t out_num) const
{
T *t_output = (T *) output;
#pragma omp parallel for
for (size_t i = 0; i < out_num; i++) {
int arr_idx = merge_index[i].first;
off_t off_in_arr = merge_index[i].second;
const T *t_arr = (const T *) arrs[arr_idx].first;
assert(&t_arr[off_in_arr] <= (const T *) arrs[arr_idx].second);
t_output[i] = t_arr[off_in_arr];
}
}
/*
* Get the length of an array indicated by the pair (`first' is the beginning
* of the array and `second' is the end of the array.
*/
template<class T>
size_t get_length(const std::pair<const char *, const char *> &arr)
{
return (arr.second - arr.first) / sizeof(T);
}
/*
* Merge multiple arrays and return the merged result as well as how
* the arrays are merged.
* Here I assume there are a few number of arrays to merge.
*/
template<class T>
void type_sorter<T>::merge_with_index(
const std::vector<std::pair<const char *, const char *> > &arrs,
char *output, size_t out_num,
std::vector<std::pair<int, off_t> > &merge_index) const
{
struct indexed_entry {
T val;
int arr_idx;
off_t off_in_arr;
};
std::unique_ptr<indexed_entry[]> buf(new indexed_entry[out_num]);
// Move data from `arrs' to `buf' in parallel.
#pragma omp parallel
{
#if defined(_OPENMP)
size_t avg_part_len = ceil(((double) out_num) / omp_get_num_threads());
size_t thread_id = omp_get_thread_num();
#else
size_t avg_part_len = out_num;
size_t thread_id = 0;
#endif
size_t start = thread_id * avg_part_len;
if (out_num > start) {
size_t part_len = std::min(out_num - start, avg_part_len);
// Find the first array for the current thread.
size_t curr_arr_idx = 0;
size_t i = 0;
while (true) {
// If the array is empty, it works fine.
size_t num_eles = get_length<T>(arrs[curr_arr_idx]);
if (i + num_eles > start)
break;
i += num_eles;
curr_arr_idx++;
}
assert(start >= i);
off_t curr_off_in_arr = start - i;
assert(get_length<T>(arrs[curr_arr_idx]) > 0);
assert(arrs[curr_arr_idx].first <= arrs[curr_arr_idx].second);
for (size_t i = 0; i < part_len; i++) {
const T *curr_ptr
= ((const T *) arrs[curr_arr_idx].first) + curr_off_in_arr;
assert(curr_ptr < (const T *) arrs[curr_arr_idx].second);
buf[start + i].val = *curr_ptr;
buf[start + i].arr_idx = curr_arr_idx;
buf[start + i].off_in_arr = curr_off_in_arr;
// If the current pointer points to the last element in the array,
// switch to the next array.
if (curr_ptr == ((const T *) arrs[curr_arr_idx].second) - 1) {
curr_arr_idx++;
// We need to skip the empty arrays.
while (curr_arr_idx < arrs.size()
&& get_length<T>(arrs[curr_arr_idx]) == 0)
curr_arr_idx++;
if (curr_arr_idx < arrs.size())
assert(arrs[curr_arr_idx].first <= arrs[curr_arr_idx].second);
curr_off_in_arr = 0;
if (i + 1 < part_len)
assert(curr_arr_idx < arrs.size());
}
else
curr_off_in_arr++;
}
}
}
std::vector<std::pair<indexed_entry *, indexed_entry *> > indexed_arrs(
arrs.size());
size_t off = 0;
for (size_t i = 0; i < arrs.size(); i++) {
size_t len = get_length<T>(arrs[i]);
indexed_arrs[i] = std::pair<indexed_entry *, indexed_entry *>(
&buf[off], &buf[off + len]);
off += len;
}
assert(off == out_num);
#if defined(_OPENMP)
struct {
bool operator()(const indexed_entry &e1, const indexed_entry &e2) const {
return e1.val < e2.val;
}
} entry_less;
std::unique_ptr<indexed_entry[]> merge_res(new indexed_entry[out_num]);
__gnu_parallel::multiway_merge(indexed_arrs.begin(), indexed_arrs.end(),
merge_res.get(), out_num, entry_less);
T *t_output = (T *) output;
for (size_t i = 0; i < out_num; i++) {
t_output[i] = merge_res[i].val;
merge_index[i].first = merge_res[i].arr_idx;
merge_index[i].second = merge_res[i].off_in_arr;
}
#else
assert(0);
#endif
}
}
#endif
|
multi_erm.h | #ifndef MULTI_ERM_H
#define MULTI_ERM_H
#include "../solvers/ista.h"
#include "../solvers/accelerator.h"
#include "../regul/regularizers.h"
#include "../regul/compute_regularization.h"
#include "../regul/mixed_l1_norm/mixed_l1_norm.h"
template <typename InputMatrixType, typename LossType>
class MULTI_ERM : public ERM<InputMatrixType> {
public:
MULTI_ERM(const Matrix<typename InputMatrixType::value_type>& w0, Matrix<typename InputMatrixType::value_type>& w, Matrix<typename InputMatrixType::value_type>& dual_variable, OptimInfo<typename InputMatrixType::value_type>& optim_info, const ParamSolver<typename InputMatrixType::value_type>& param, const ParamModel<typename InputMatrixType::value_type>& model) :ERM<InputMatrixType>(optim_info, param, model), W0(w0), W(w), dual_variable(dual_variable) {
}
// X is p x n
// y is nclasses x n
// W0 is p x nclasses if no intercept (or p+1 x nclasses with intercept)
// prediction model is W0^FeatureType X gives nclasses x n
void solve_problem_vector(const InputMatrixType& X, const Vector<int>& y) {
verify_input(X);
const int nclass = y.maxval() + 1;
if ((super::is_regression_loss(super::model.loss) || !super::is_loss_for_matrices(super::model.loss)))
{
const int n = y.n();
Matrix<typename InputMatrixType::value_type> labels(nclass, n);
labels.set(-(1.0));
for (int ii = 0; ii < n; ++ii)
labels(y[ii], ii) = (1.0);
MULTI_ERM<InputMatrixType, LinearLossMat<InputMatrixType, Matrix<typename InputMatrixType::value_type>>> problem_configuration(W0, W, dual_variable, super::optim_info, super::param, super::model);
return problem_configuration.solve_problem_matrix(X, labels);
}
init_omp(super::param.threads);
typedef Matrix<FeatureType> D;
DataMatrixLinear<InputMatrixType> data(X, super::model.intercept);
if (super::param.verbose)
data.print();
LinearLossMat<InputMatrixType, Vector<int>>* loss = new MultiClassLogisticLoss<InputMatrixType>(data, y);
if (super::model.loss != MULTI_LOGISTIC) {
logging(logERROR) << "Multilog loss is the only multi class implemented loss!";
logging(logINFO) << "Multilog loss is used!";
}
const bool transpose = loss->transpose();
Regularizer<D, PointerType>* regul = get_regul_mat(nclass, transpose);
solve_mat(*loss, *regul);
delete (regul);
delete (loss);
};
// X is p x n
// y is nclasses x n
// W0 is p x nclasses if no intercept (or p+1 x nclasses with intercept)
// prediction model is W0^FeatureType X gives nclasses x n
void solve_problem_matrix(const InputMatrixType& X, const Matrix<typename InputMatrixType::value_type>& y) {
verify_input(X);
init_omp(super::param.threads);
typedef Matrix<FeatureType> D;
if (super::is_loss_for_matrices(super::model.loss) || super::is_regul_for_matrices(super::model.regul))
{
DataMatrixLinear<InputMatrixType> data(X, super::model.intercept);
if (super::param.verbose)
data.print();
LinearLossMat<InputMatrixType, Matrix<FeatureType>>* loss = get_loss_matrix(data, y);
const int nclass = W0.n();
Regularizer<D, PointerType>* regul = get_regul_mat(nclass, loss->transpose());
solve_mat(*loss, *regul);
delete (regul);
delete (loss);
}
else
{
W.copy(W0);
const int nclass = W0.n();
const int duality_gap_interval = MAX(super::param.duality_gap_interval, 1);
super::optim_info.resize(nclass, NUMBER_OPTIM_PROCESS_INFO, MAX(super::param.max_iter / duality_gap_interval, 1));
super::optim_info.setZeros();
ParamSolver<FeatureType> param2 = super::param;
param2.verbose = false;
if (super::param.verbose)
{
DataMatrixLinear<InputMatrixType> data(X, super::model.intercept);
data.print();
}
Timer global_all;
global_all.start();
#pragma omp parallel for
for (int ii = 0; ii < nclass; ++ii)
{
Vector<FeatureType> w0col, wcol, ycol, dualcol;
OptimInfo<FeatureType> optim_info_col;
W0.refCol(ii, w0col);
W.refCol(ii, wcol);
y.copyRow(ii, ycol);
if (dual_variable.m() == nclass)
{
dual_variable.copyRow(ii, dualcol);
}
SIMPLE_ERM<InputMatrixType, LinearLossVec<InputMatrixType>> problem_configuration = SIMPLE_ERM<InputMatrixType, LinearLossVec<InputMatrixType>>(w0col, wcol, dualcol, optim_info_col, param2, super::model);
problem_configuration.solve_problem(X, ycol);
if (dual_variable.m() == nclass)
dual_variable.copyToRow(ii, dualcol);
#pragma omp critical
{
super::optim_info.add(optim_info_col, ii);
if (super::param.verbose)
{
const int noptim = optim_info_col.n() - 1;
logging(logINFO) << "Solver " << ii << " has terminated after " << optim_info_col(0, 0, noptim) << " epochs in " << optim_info_col(0, 5, noptim) << " seconds";
if (optim_info_col(0, 4, noptim) == 0)
{
logging(logINFO) << " Primal objective: " << optim_info_col(0, 1, noptim) << ", relative duality gap: " << optim_info_col(0, 3, noptim);
}
else
{
logging(logINFO) << " Primal objective: " << optim_info_col(0, 1, noptim) << ", tol: " << optim_info_col(0, 4, noptim);
}
}
}
}
global_all.stop();
if (super::param.verbose)
{
logging(logINFO) << "Time for the one-vs-all strategy";
global_all.printElapsed();
}
}
}
private:
typedef ERM<InputMatrixType> super;
typedef typename InputMatrixType::value_type FeatureType;
typedef typename InputMatrixType::index_type PointerType;
const Matrix<FeatureType>& W0;
Matrix<FeatureType>& W;
Matrix<FeatureType>& dual_variable;
inline void verify_input(const InputMatrixType& X) {
if ((super::model.intercept && X.m() + 1 != W0.m()) || (!super::model.intercept && X.m() != W0.m())){
logging(logERROR) << "Dimension of initial point is not consistent.";
return;
}
if (super::param.max_iter < 0)
{
throw ValueError("Maximum number of iteration must be positive");
}
if (super::model.lambda_1 < 0)
{
throw ValueError("Penalty term must be positive");
}
if (super::param.tol < 0)
{
throw ValueError("Tolerance for stopping criteria must be positive");
}
};
LinearLossMat<InputMatrixType, Matrix<typename InputMatrixType::value_type>>* get_loss_matrix(DataMatrixLinear<InputMatrixType>& data, const Matrix<typename InputMatrixType::value_type>& y) {
typedef typename InputMatrixType::value_type FeatureType;
LinearLossMat<InputMatrixType, Matrix<FeatureType>>* loss;
switch (super::model.loss)
{
case SQUARE:
loss = new SquareLossMat<InputMatrixType>(data, y);
break;
case LOGISTIC:
loss = new LossMat<LogisticLoss<InputMatrixType>>(data, y);
break;
case SQHINGE:
loss = new LossMat<SquaredHingeLoss<InputMatrixType>>(data, y);
break;
case SAFE_LOGISTIC:
loss = new LossMat<SafeLogisticLoss<InputMatrixType>>(data, y);
break;
default:
logging(logERROR) << "Not implemented, square loss is chosen by default";
loss = new SquareLossMat<InputMatrixType>(data, y);
}
return loss;
}
inline void solve_mat(LossType& loss, const Regularizer<typename LossType::variable_type, typename LossType::index_type>& regul)
{
typedef typename LossType::value_type value_type;
typedef typename LossType::variable_type variable_type;
Solver<LossType>* solver;
if (super::param.max_iter == 0)
{
ParamSolver<value_type> param2 = super::param;
param2.verbose = false;
solver = new ISTA_Solver<LossType>(loss, regul, param2);
if (loss.transpose())
{
Matrix<value_type> W0T, WT;
W0.transpose(W0T);
solver->eval(W0T);
}
else
{
solver->eval(W0);
}
W.copy(W0);
}
else
{
solver = get_solver(loss, regul, super::param);
if (!solver)
{
W.copy(W0);
return;
}
variable_type new_W0;
if (loss.intercept())
{
loss.set_intercept(W0, new_W0);
}
else
{
new_W0.copyRef(W0);
}
if (dual_variable.n() != 0)
solver->set_dual_variable(dual_variable);
if (loss.transpose())
{
Matrix<value_type> W0T, WT;
new_W0.transpose(W0T);
solver->solve(W0T, WT);
WT.transpose(W);
}
else
{
solver->solve(new_W0, W);
}
if (loss.intercept())
{
loss.reverse_intercept(W);
}
}
if (regul.id() == L1)
for (INTM ii = 0; ii < W.n(); ++ii)
for (INTM jj = 0; jj < W.m(); ++jj)
if (abs<value_type>(W(jj, ii)) < EPSILON)
W(jj, ii) = 0;
solver->get_optim_info(super::optim_info);
delete (solver);
};
Solver<LossType>* get_solver(const LossType& loss, const Regularizer<typename LossType::variable_type, typename LossType::index_type>& regul, const ParamSolver<typename LossType::value_type>& param)
{
Solver<LossType>* solver;
solver_t solver_type = param.solver;
if (solver_type == AUTO)
{
const FeatureType L = loss.lipschitz();
const int n = loss.n();
const FeatureType lambda_1 = regul.strong_convexity();
if (n < 1000)
{
solver_type = QNING_ISTA;
}
else if (lambda_1 < L / (100 * n))
{
solver_type = QNING_MISO;
}
else
{
solver_type = CATALYST_MISO;
}
}
switch (solver_type)
{
case ISTA:
solver = new ISTA_Solver<LossType>(loss, regul, param);
break;
case QNING_ISTA:
solver = new QNing<ISTA_Solver<LossType>>(loss, regul, param);
break;
case CATALYST_ISTA:
solver = new Catalyst<ISTA_Solver<LossType>>(loss, regul, param);
break;
case FISTA:
solver = new FISTA_Solver<LossType>(loss, regul, param);
break;
case SVRG:
solver = new SVRG_Solver<LossType>(loss, regul, param);
break;
case MISO:
solver = regul.strong_convexity() > 0 ? new MISO_Solver<LossType>(loss, regul, param) : new Catalyst<MISO_Solver<LossType>>(loss, regul, param);
break;
case SVRG_UNIFORM:
{
ParamSolver<typename LossType::value_type> param2 = param;
param2.non_uniform_sampling = false;
solver = new SVRG_Solver<LossType>(loss, regul, param2);
break;
}
case CATALYST_SVRG:
solver = new Catalyst<SVRG_Solver<LossType>>(loss, regul, param);
break;
case QNING_SVRG:
solver = new QNing<SVRG_Solver<LossType>>(loss, regul, param);
break;
case CATALYST_MISO:
solver = new Catalyst<MISO_Solver<LossType>>(loss, regul, param);
break;
case QNING_MISO:
solver = new QNing<MISO_Solver<LossType>>(loss, regul, param);
break;
case ACC_SVRG:
solver = new Acc_SVRG_Solver<LossType>(loss, regul, param);
break;
default:
throw NotImplementedException("This solver is not implemented!");
solver = NULL;
}
return solver;
};
Regularizer<Matrix<typename InputMatrixType::value_type>, typename InputMatrixType::index_type>* get_regul_mat(const int nclass, const bool transpose)
{
typedef Matrix<FeatureType> D;
typedef Vector<FeatureType> V;
Regularizer<D, PointerType>* regul;
switch (super::model.regul)
{
case L2:
regul = transpose ? static_cast<Regularizer<D, PointerType> *>(new RegVecToMat<Ridge<V, PointerType>>(super::model))
: new RegMat<Ridge<V, PointerType>>(super::model, nclass, transpose);
break;
case L1:
regul = transpose ? static_cast<Regularizer<D, PointerType> *>(new RegVecToMat<Lasso<V, PointerType>>(super::model))
: new RegMat<Lasso<V, PointerType>>(super::model, nclass, transpose);
break;
case ELASTICNET:
regul = transpose ? static_cast<Regularizer<D, PointerType> *>(new RegVecToMat<ElasticNet<V, PointerType>>(super::model))
: new RegMat<ElasticNet<V, PointerType>>(super::model, nclass, transpose);
break;
case L1BALL:
regul = new RegMat<L1Ball<V, PointerType>>(super::model, nclass, transpose);
break;
case L2BALL:
regul = new RegMat<L2Ball<V, PointerType>>(super::model, nclass, transpose);
break;
case L1L2:
regul = new MixedL1L2<FeatureType, PointerType>(super::model, nclass, transpose);
break;
case L1L2_L1:
regul = new MixedL1L2_L1<FeatureType, PointerType>(super::model, nclass, transpose);
break;
case L1LINF:
regul = new MixedL1Linf<FeatureType, PointerType>(super::model, nclass, transpose);
break;
case FUSEDLASSO:
regul = new RegMat<FusedLasso<V, PointerType>>(super::model, nclass, transpose);
break;
case NONE:
regul = new None<D, PointerType>(super::model);
break;
default:
logging(logERROR) << "Not implemented, no regularization is chosen";
regul = new None<D, PointerType>(super::model);
}
return regul;
};
};
#endif |
viterbi_decode_op.h | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/operators/controlflow/compare_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_functor.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/operators/gather.h"
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/transpose_op.h"
#include "paddle/fluid/operators/unique_op.h"
#ifdef PADDLE_WITH_MKLML
#include <omp.h>
#endif
namespace paddle {
namespace operators {
using LoDTensor = framework::LoDTensor;
template <typename DeviceContext, typename T, typename IndType>
struct Argmax {
void operator()(const framework::ExecutionContext& ctx, const Tensor& input,
Tensor* out_idx, Tensor* out, int axis) {
framework::DDim input_dims = input.dims();
int64_t pre = 1;
int64_t post = 1;
int64_t n = input_dims[axis];
for (int i = 0; i < axis; i++) {
pre *= input_dims[i];
}
for (int i = axis + 1; i < input_dims.size(); i++) {
post *= input_dims[i];
}
int64_t height = pre * post;
int64_t width = n;
const T* in_data = input.data<T>();
IndType* out_idx_data = out_idx->data<IndType>();
T* out_data = out->data<T>();
// Reduce
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int64_t i = 0; i < height; ++i) {
int64_t h = i / post;
int64_t w = i % post;
IndType max_idx = -1;
T max_value = (std::numeric_limits<T>::lowest)(); // for windows compile
for (int64_t j = 0; j < width; ++j) {
if (in_data[h * width * post + j * post + w] > max_value) {
max_value = in_data[h * width * post + j * post + w];
max_idx = j;
}
}
out_data[i] = max_value;
out_idx_data[i] = max_idx;
}
}
};
template <typename DeviceContext>
struct ARange {
void operator()(const DeviceContext& dev_ctx, int64_t* data, int end,
int64_t scale) {
for (int i = 0; i < end; ++i) {
data[i] = i * scale;
}
}
};
template <typename DeviceContext, typename T>
struct GetMaxValue {
void operator()(const DeviceContext& dev_ctx, const Tensor& input,
T* max_value) {
auto input_ptr = input.data<T>();
auto num = input.numel();
*max_value = *std::max_element(input_ptr, input_ptr + num);
}
};
template <typename DeviceContext, typename T, typename IndexT = int>
struct Gather {
void operator()(const DeviceContext& ctx, const Tensor& src,
const Tensor& index, Tensor* output) {
CPUGather<T, IndexT>(ctx, src, index, output);
}
};
template <typename T, typename Functor, typename OutT = T>
void SameDimsBinaryOP(const Tensor& lhs, const Tensor& rhs, Tensor* out) {
const T* lhs_ptr = lhs.data<T>();
const T* rhs_ptr = rhs.data<T>();
OutT* out_ptr = out->data<OutT>();
Functor functor;
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < out->numel(); ++i) {
out_ptr[i] = functor(lhs_ptr[i], rhs_ptr[i]);
}
}
template <typename DeviceContext,
template <typename InT, typename OutT> typename CompareFunctor,
typename T>
struct GetMask {
void operator()(const framework::ExecutionContext& ctx, const Tensor& lhs,
const Tensor& rhs, Tensor* mask) {
SameDimsBinaryOP<int64_t, CompareFunctor<int64_t, T>, T>(lhs, rhs, mask);
}
};
template <bool is_multi_threads>
struct GetInputIndex {
void operator()(const std::vector<int>& lhs_dims,
const std::vector<int>& rhs_dims,
const std::vector<int>& output_dims,
const std::vector<int>& lhs_strides,
const std::vector<int>& rhs_strides,
const std::vector<int>& output_strides, int output_idx,
int* index_array, int* lhs_idx, int* rhs_idx) {
int out_dims_size = output_strides.size();
for (int j = 0; j < out_dims_size; ++j) {
int curr_idx = output_idx / output_strides[j];
output_idx %= output_strides[j];
*lhs_idx += (lhs_dims[j] > 1) ? curr_idx * lhs_strides[j] : 0;
*rhs_idx += (rhs_dims[j] > 1) ? curr_idx * rhs_strides[j] : 0;
}
}
};
template <>
struct GetInputIndex<false> {
void operator()(const std::vector<int>& lhs_dims,
const std::vector<int>& rhs_dims,
const std::vector<int>& output_dims,
const std::vector<int>& lhs_strides,
const std::vector<int>& rhs_strides,
const std::vector<int>& output_strides, int output_idx,
int* index_array, int* lhs_idx, int* rhs_idx) {
int out_dims_size = output_strides.size();
*lhs_idx =
pten::GetElementwiseIndex(lhs_dims.data(), out_dims_size, index_array);
*rhs_idx =
pten::GetElementwiseIndex(rhs_dims.data(), out_dims_size, index_array);
pten::UpdateElementwiseIndexArray(output_dims.data(), out_dims_size,
index_array);
}
};
template <typename T, typename Functor, bool is_multi_threads = false>
void SimpleBroadcastBinaryOP(const Tensor& lhs, const Tensor& rhs,
Tensor* out) {
const T* lhs_ptr = lhs.data<T>();
const T* rhs_ptr = rhs.data<T>();
T* out_ptr = out->data<T>();
int out_size = static_cast<int>(out->dims().size());
std::vector<int> out_dims(out_size);
std::vector<int> lhs_dims(out_size);
std::vector<int> rhs_dims(out_size);
std::copy(lhs.dims().Get(), lhs.dims().Get() + out_size, lhs_dims.data());
std::copy(rhs.dims().Get(), rhs.dims().Get() + out_size, rhs_dims.data());
std::copy(out->dims().Get(), out->dims().Get() + out_size, out_dims.data());
std::vector<int> output_strides(out_size, 1);
std::vector<int> lhs_strides(out_size, 1);
std::vector<int> rhs_strides(out_size, 1);
std::vector<int> index_array(out_size, 0);
// calculate strides
for (int i = out_size - 2; i >= 0; --i) {
output_strides[i] = output_strides[i + 1] * out_dims[i + 1];
lhs_strides[i] = lhs_strides[i + 1] * lhs_dims[i + 1];
rhs_strides[i] = rhs_strides[i + 1] * rhs_dims[i + 1];
}
Functor functor;
GetInputIndex<is_multi_threads> get_input_index;
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < out->numel(); ++i) {
int lhs_idx = 0;
int rhs_idx = 0;
get_input_index(lhs_dims, rhs_dims, out_dims, lhs_strides, rhs_strides,
output_strides, i, index_array.data(), &lhs_idx, &rhs_idx);
out_ptr[i] = functor(lhs_ptr[lhs_idx], rhs_ptr[rhs_idx]);
}
}
template <typename DeviceContext, template <typename T> typename BinaryFunctor,
typename T>
struct BinaryOperation {
void operator()(const DeviceContext& dev_ctx, const Tensor& lhs,
const Tensor& rhs, Tensor* output) {
if (lhs.dims() == rhs.dims()) {
SameDimsBinaryOP<T, BinaryFunctor<T>>(lhs, rhs, output);
} else {
bool is_multi_threads = false;
#ifdef PADDLE_WITH_MKLML
if (omp_get_max_threads() > 1) {
is_multi_threads = true;
}
#endif
if (is_multi_threads) {
SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, true>(lhs, rhs, output);
} else {
SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, false>(lhs, rhs, output);
}
}
}
};
class TensorBuffer {
public:
explicit TensorBuffer(const LoDTensor& in) : buffer_(in), offset_(0) {
buffer_.Resize({buffer_.numel()});
}
Tensor GetBufferBlock(std::initializer_list<int64_t> shape) {
int64_t size = std::accumulate(shape.begin(), shape.end(), 1,
std::multiplies<int64_t>());
Tensor block = buffer_.Slice(offset_, offset_ + size);
offset_ += size;
block.Resize(shape);
return block;
}
private:
LoDTensor buffer_; // need to resize 1-D Tensor
int offset_;
};
template <typename DeviceContext, typename T>
class ViterbiDecodeKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
bool include_bos_eos_tag = ctx.Attr<bool>("include_bos_eos_tag");
auto& dev_ctx = ctx.template device_context<DeviceContext>();
auto curr_place = ctx.GetPlace();
auto* input = ctx.Input<Tensor>("Input");
auto batch_size = static_cast<int>(input->dims()[0]);
auto seq_len = static_cast<int>(input->dims()[1]);
auto n_labels = static_cast<int>(input->dims()[2]);
math::SetConstant<DeviceContext, T> float_functor;
math::SetConstant<DeviceContext, int64_t> int_functor;
std::vector<Tensor> historys;
// We create tensor buffer in order to avoid allocating memory frequently
// 10 means allocate 10*batch_size bytes memory, such as int_mask, zero...
int buffer_size = batch_size * (n_labels + 1) * seq_len + 10 * batch_size;
LoDTensor int_buffer;
int_buffer.Resize(framework::make_ddim({buffer_size}));
int_buffer.mutable_data<int64_t>(ctx.GetPlace());
TensorBuffer int_tensor_buffer(int_buffer);
// create float tensor buffer
// 10 means allocate 10*batch_size*n_labels bytes, such as alpha, alpha_max
buffer_size = batch_size * (seq_len + 10) * n_labels +
(batch_size + 2) * n_labels * n_labels;
LoDTensor float_buffer;
float_buffer.Resize(framework::make_ddim({buffer_size}));
float_buffer.mutable_data<T>(ctx.GetPlace());
TensorBuffer float_tensor_buffer(float_buffer);
auto* length = ctx.Input<Tensor>("Length");
Tensor left_length = int_tensor_buffer.GetBufferBlock({batch_size, 1});
framework::TensorCopy(*length, curr_place, dev_ctx, &left_length);
int64_t max_seq_len = 0;
GetMaxValue<DeviceContext, int64_t> get_max_value;
get_max_value(dev_ctx, left_length, &max_seq_len);
auto* scores = ctx.Output<Tensor>("Scores");
scores->mutable_data<T>(curr_place);
auto* path = ctx.Output<Tensor>("Path");
path->Resize({batch_size, max_seq_len});
path->mutable_data<int64_t>(curr_place);
Tensor tpath = int_tensor_buffer.GetBufferBlock({max_seq_len, batch_size});
auto batch_path = Unbind(tpath);
for (auto it = batch_path.begin(); it != batch_path.end(); ++it) {
it->Resize({batch_size});
}
// create and init required tensor
Tensor input_exp =
float_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels});
TransCompute<DeviceContext, T>(3, dev_ctx, *input, &input_exp, {1, 0, 2});
auto* transition = ctx.Input<Tensor>("Transition");
Tensor trans_exp = float_tensor_buffer.GetBufferBlock({n_labels, n_labels});
framework::TensorCopy(*transition, curr_place, dev_ctx, &trans_exp);
trans_exp.Resize({1, n_labels, n_labels});
Tensor alpha = float_tensor_buffer.GetBufferBlock({batch_size, n_labels});
Tensor zero = int_tensor_buffer.GetBufferBlock({batch_size, 1});
int_functor(dev_ctx, &zero, 0);
Tensor one = int_tensor_buffer.GetBufferBlock({batch_size, 1});
int_functor(dev_ctx, &one, 1);
Tensor float_one = float_tensor_buffer.GetBufferBlock({batch_size, 1});
float_functor(dev_ctx, &float_one, static_cast<T>(1.0));
Tensor alpha_trn_sum =
float_tensor_buffer.GetBufferBlock({batch_size, n_labels, n_labels});
Tensor alpha_max =
float_tensor_buffer.GetBufferBlock({batch_size, n_labels});
Tensor alpha_argmax =
int_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels});
auto alpha_argmax_unbind = Unbind(alpha_argmax);
Tensor alpha_nxt =
float_tensor_buffer.GetBufferBlock({batch_size, n_labels});
Tensor int_mask = int_tensor_buffer.GetBufferBlock({batch_size});
Tensor zero_len_mask = int_tensor_buffer.GetBufferBlock({batch_size});
Tensor float_mask = float_tensor_buffer.GetBufferBlock({batch_size, 1});
Tensor stop_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels});
Tensor start_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels});
Tensor rest_trans =
float_tensor_buffer.GetBufferBlock({1, n_labels - 2, n_labels});
Tensor last_ids = int_tensor_buffer.GetBufferBlock({batch_size});
Tensor last_ids_tmp = int_tensor_buffer.GetBufferBlock({batch_size});
Tensor batch_offset = int_tensor_buffer.GetBufferBlock({batch_size});
Tensor gather_idx = int_tensor_buffer.GetBufferBlock({batch_size});
std::vector<const Tensor*> shape{&rest_trans, &stop_trans, &start_trans};
std::vector<Tensor*> outputs{&rest_trans, &stop_trans, &start_trans};
math::SplitFunctor<DeviceContext, T> split_functor;
split_functor(dev_ctx, trans_exp, shape, 1, &outputs);
stop_trans.Resize({1, n_labels});
start_trans.Resize({1, n_labels});
auto logit0 = input_exp.Slice(0, 1);
logit0.Resize({batch_size, n_labels});
BinaryOperation<DeviceContext, AddFunctor, T> AddFloat;
BinaryOperation<DeviceContext, AddFunctor, int64_t> AddInt;
BinaryOperation<DeviceContext, MulFunctor, T> MulFloat;
BinaryOperation<DeviceContext, MulFunctor, int64_t> MulInt;
BinaryOperation<DeviceContext, SubFunctor, T> SubFloat;
BinaryOperation<DeviceContext, SubFunctor, int64_t> SubInt;
if (include_bos_eos_tag) {
AddFloat(dev_ctx, logit0, start_trans, &alpha);
GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one,
&float_mask);
MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt);
AddFloat(dev_ctx, alpha, alpha_nxt, &alpha);
} else {
alpha = logit0;
}
SubInt(dev_ctx, left_length, one, &left_length);
Argmax<DeviceContext, T, int64_t> argmax;
for (int64_t i = 1; i < max_seq_len; ++i) {
Tensor logit = input_exp.Slice(i, i + 1);
logit.Resize({batch_size, n_labels});
Tensor& alpha_exp = alpha.Resize({batch_size, n_labels, 1});
AddFloat(dev_ctx, alpha_exp, trans_exp, &alpha_trn_sum);
auto alpha_argmax_temp = alpha_argmax_unbind[i - 1];
alpha_argmax_temp.Resize({batch_size, n_labels});
argmax(ctx, alpha_trn_sum, &alpha_argmax_temp, &alpha_max, 1);
historys.emplace_back(alpha_argmax_temp);
AddFloat(dev_ctx, alpha_max, logit, &alpha_nxt);
alpha.Resize({batch_size, n_labels});
// mask = paddle.cast((left_length > 0), dtype='float32')
// alpha = mask * alpha_nxt + (1 - mask) * alpha
GetMask<DeviceContext, GreaterThanFunctor, T>()(ctx, left_length, zero,
&float_mask);
// alpha_nxt = mask * alpha_nxt
MulFloat(dev_ctx, alpha_nxt, float_mask, &alpha_nxt);
// inv_mask = 1 - mask
SubFloat(dev_ctx, float_one, float_mask, &float_mask);
// alpha = (1 - mask) * alpha
MulFloat(dev_ctx, alpha, float_mask, &alpha);
// alpha += alpha_nxt
AddFloat(dev_ctx, alpha, alpha_nxt, &alpha);
if (include_bos_eos_tag) {
GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one,
&float_mask);
// alpha += mask * trans_exp[:, self.stop_idx]
MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt);
AddFloat(dev_ctx, alpha, alpha_nxt, &alpha);
}
SubInt(dev_ctx, left_length, one, &left_length);
}
argmax(ctx, alpha, &last_ids, scores, 1);
left_length.Resize({batch_size});
GetMask<DeviceContext, GreaterEqualFunctor, int64_t>()(ctx, left_length,
zero, &int_mask);
// last_ids_update = last_ids * tag_mask
int last_ids_index = 1;
int actual_len = (std::min)(seq_len, static_cast<int>(max_seq_len));
MulInt(dev_ctx, last_ids, int_mask,
&batch_path[actual_len - last_ids_index]);
// The algorithm below can refer to
// https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/layers/crf.py#L438
ARange<DeviceContext> arange;
arange(dev_ctx, batch_offset.data<int64_t>(), batch_size, n_labels);
Gather<DeviceContext, int64_t, int64_t> gather;
for (auto hist = historys.rbegin(); hist != historys.rend(); ++hist) {
++last_ids_index;
AddInt(dev_ctx, left_length, one, &left_length);
AddInt(dev_ctx, batch_offset, last_ids, &gather_idx);
Tensor& last_ids_update = batch_path[actual_len - last_ids_index];
hist->Resize({batch_size * n_labels});
gather(dev_ctx, *hist, gather_idx, &last_ids_update);
GetMask<DeviceContext, GreaterThanFunctor, int64_t>()(ctx, left_length,
zero, &int_mask);
MulInt(dev_ctx, last_ids_update, int_mask, &last_ids_update);
GetMask<DeviceContext, EqualFunctor, int64_t>()(ctx, left_length, zero,
&zero_len_mask);
MulInt(dev_ctx, last_ids, zero_len_mask, &last_ids_tmp);
SubInt(dev_ctx, one, zero_len_mask, &zero_len_mask);
MulInt(dev_ctx, last_ids_update, zero_len_mask, &last_ids_update);
AddInt(dev_ctx, last_ids_update, last_ids_tmp, &last_ids_update);
GetMask<DeviceContext, LessThanFunctor, int64_t>()(ctx, left_length, zero,
&int_mask);
MulInt(dev_ctx, last_ids, int_mask, &last_ids);
AddInt(dev_ctx, last_ids_update, last_ids, &last_ids);
}
TransCompute<DeviceContext, int64_t>(2, dev_ctx, tpath, path, {1, 0});
}
};
} // namespace operators
} // namespace paddle
|
Rasterizer.c | /*
MIT License
Copyright (c) 2017 trenki2
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "Rasterizer.h"
#include "EdgeEquation.h"
#include "EdgeData.h"
#include <stdlib.h>
static inline void swap_ptrs(const void **ptr1, const void **ptr2)
{
const void *tmp = *ptr1;
*ptr1 = *ptr2;
*ptr2 = tmp;
}
void Rasterizer_construct(Rasterizer *rs)
{
Rasterizer_setRasterMode(rs, RM_Span);
Rasterizer_setScissorRect(rs, 0, 0, 0, 0);
Rasterizer_setPixelShader(rs, 0);
}
void Rasterizer_setRasterMode(Rasterizer *rs, RasterMode mode)
{
rs->rasterMode = mode;
}
/// Set the scissor rectangle.
void Rasterizer_setScissorRect(Rasterizer *rs, int x, int y, int width, int height)
{
rs->m_minX = x;
rs->m_minY = y;
rs->m_maxX = x + width;
rs->m_maxY = y + height;
}
void Rasterizer_setPixelShader(Rasterizer *rs, PixelShader *ps)
{
rs->m_pixelShader = ps;
rs->m_triangleFunc = Rasterizer_drawTriangleModeTemplate;
rs->m_lineFunc = Rasterizer_drawLineTemplate;
rs->m_pointFunc = Rasterizer_drawPointTemplate;
}
void Rasterizer_drawPoint(Rasterizer *rs, const RasterizerVertex *v)
{
(*rs->m_pointFunc)(rs, v);
}
void Rasterizer_drawLine(Rasterizer *rs, const RasterizerVertex *v0, const RasterizerVertex *v1)
{
(*rs->m_lineFunc)(rs, v0, v1);
}
void Rasterizer_drawTriangle(Rasterizer *rs, const RasterizerVertex *v0, const RasterizerVertex *v1, const RasterizerVertex *v2)
{
(*rs->m_triangleFunc)(rs, v0, v1, v2);
}
void Rasterizer_drawPointList(Rasterizer *rs, const RasterizerVertex *vertices, const int *indices, unsigned long indexCount)
{
for (unsigned long i = 0; i < indexCount; ++i) {
if (indices[i] == -1)
continue;
Rasterizer_drawPoint(rs, &vertices[indices[i]]);
}
}
void Rasterizer_drawLineList(Rasterizer *rs, const RasterizerVertex *vertices, const int *indices, unsigned long indexCount)
{
for (unsigned long i = 0; i + 2 <= indexCount; i += 2) {
if (indices[i] == -1)
continue;
Rasterizer_drawLine(rs, &vertices[indices[i]], &vertices[indices[i + 1]]);
}
}
void Rasterizer_drawTriangleList(Rasterizer *rs, const RasterizerVertex *vertices, const int *indices, unsigned long indexCount)
{
for (unsigned long i = 0; i + 3 <= indexCount; i += 3) {
if (indices[i] == -1)
continue;
Rasterizer_drawTriangle(rs, &vertices[indices[i]], &vertices[indices[i + 1]], &vertices[indices[i + 2]]);
}
}
bool Rasterizer_scissorTest(Rasterizer *rs, float x, float y)
{
return (x >= rs->m_minX && x < rs->m_maxX && y >= rs->m_minY && y < rs->m_maxY);
}
void Rasterizer_drawPointTemplate(Rasterizer *rs, const RasterizerVertex *v)
{
// Check scissor rect
if (!Rasterizer_scissorTest(rs, v->x, v->y))
return;
PixelData p = Rasterizer_pixelDataFromVertex(rs, v);
if (rs->m_pixelShader && rs->m_pixelShader->drawPixel)
rs->m_pixelShader->drawPixel(&p);
}
PixelData Rasterizer_pixelDataFromVertex(Rasterizer *rs, const RasterizerVertex *v)
{
PixelData p;
p.x = (int)v->x;
p.y = (int)v->y;
if (rs->m_pixelShader->InterpolateZ) p.z = v->z;
if (rs->m_pixelShader->InterpolateW) p.invw = 1.0f / v->w;
for (int i = 0; i < rs->m_pixelShader->AVarCount; ++i)
p.avar[i] = v->avar[i];
return p;
}
void Rasterizer_drawLineTemplate(Rasterizer *rs, const RasterizerVertex *v0, const RasterizerVertex *v1)
{
int adx = abs((int)v1->x - (int)v0->x);
int ady = abs((int)v1->y - (int)v0->y);
int steps = max(adx, ady);
RasterizerVertex step = Rasterizer_computeVertexStep(rs, v0, v1, steps);
RasterizerVertex v = *v0;
while (steps-- > 0)
{
PixelData p = Rasterizer_pixelDataFromVertex(rs, &v);
if (Rasterizer_scissorTest(rs, v.x, v.y))
if (rs->m_pixelShader && rs->m_pixelShader->drawPixel)
rs->m_pixelShader->drawPixel(&p);
Rasterizer_stepVertex(rs, &v, &step);
}
}
void Rasterizer_stepVertex(Rasterizer *rs, RasterizerVertex *v, RasterizerVertex *step)
{
v->x += step->x;
v->y += step->y;
if (rs->m_pixelShader->InterpolateZ) v->z += step->z;
if (rs->m_pixelShader->InterpolateW) v->w += step->w;
for (int i = 0; i < rs->m_pixelShader->AVarCount; ++i)
v->avar[i] += step->avar[i];
}
RasterizerVertex Rasterizer_computeVertexStep(Rasterizer *rs, const RasterizerVertex *v0, const RasterizerVertex *v1, int adx)
{
RasterizerVertex step;
step.x = (v1->x - v0->x) / adx;
step.y = (v1->y - v0->y) / adx;
if (rs->m_pixelShader->InterpolateZ) step.z = (v1->z - v0->z) / adx;
if (rs->m_pixelShader->InterpolateW) step.w = (v1->w - v0->w) / adx;
for (int i = 0; i < rs->m_pixelShader->AVarCount; ++i)
step.avar[i] = (v1->avar[i] - v0->avar[i]) / adx;
return step;
}
void Rasterizer_drawTriangleBlockTemplate(Rasterizer *rs, const RasterizerVertex *v0, const RasterizerVertex *v1, const RasterizerVertex *v2)
{
// Compute triangle equations.
TriangleEquations eqn;
TriangleEquations_construct(&eqn, v0, v1, v2, rs->m_pixelShader->AVarCount, rs->m_pixelShader->PVarCount);
// Check if triangle is backfacing.
if (eqn.area2 <= 0)
return;
// Compute triangle bounding box.
int minX = (int)min(min(v0->x, v1->x), v2->x);
int maxX = (int)max(max(v0->x, v1->x), v2->x);
int minY = (int)min(min(v0->y, v1->y), v2->y);
int maxY = (int)max(max(v0->y, v1->y), v2->y);
// Clip to scissor rect.
minX = max(minX, rs->m_minX);
maxX = min(maxX, rs->m_maxX);
minY = max(minY, rs->m_minY);
maxY = min(maxY, rs->m_maxY);
// Round to block grid.
minX = minX & ~(BlockSize - 1);
maxX = maxX & ~(BlockSize - 1);
minY = minY & ~(BlockSize - 1);
maxY = maxY & ~(BlockSize - 1);
float s = BlockSize - 1;
int stepsX = (maxX - minX) / BlockSize + 1;
int stepsY = (maxY - minY) / BlockSize + 1;
int i;
#pragma omp parallel for
for (i = 0; i < stepsX * stepsY; ++i)
{
int sx = i % stepsX;
int sy = i / stepsX;
// Add 0.5 to sample at pixel centers.
int x = minX + sx * BlockSize;
int y = minY + sy * BlockSize;
float xf = x + 0.5f;
float yf = y + 0.5f;
// Test if block is inside or outside triangle or touches it.
EdgeData e00; EdgeData_init(&e00, &eqn, xf, yf);
EdgeData e01 = e00; EdgeData_stepY2(&e01, &eqn, s);
EdgeData e10 = e00; EdgeData_stepX2(&e10, &eqn, s);
EdgeData e11 = e01; EdgeData_stepX2(&e11, &eqn, s);
bool e00_0 = EdgeEquation_testValue(&eqn.e0, e00.ev0), e00_1 = EdgeEquation_testValue(&eqn.e1, e00.ev1), e00_2 = EdgeEquation_testValue(&eqn.e2, e00.ev2), e00_all = e00_0 && e00_1 && e00_2;
bool e01_0 = EdgeEquation_testValue(&eqn.e0, e01.ev0), e01_1 = EdgeEquation_testValue(&eqn.e1, e01.ev1), e01_2 = EdgeEquation_testValue(&eqn.e2, e01.ev2), e01_all = e01_0 && e01_1 && e01_2;
bool e10_0 = EdgeEquation_testValue(&eqn.e0, e10.ev0), e10_1 = EdgeEquation_testValue(&eqn.e1, e10.ev1), e10_2 = EdgeEquation_testValue(&eqn.e2, e10.ev2), e10_all = e10_0 && e10_1 && e10_2;
bool e11_0 = EdgeEquation_testValue(&eqn.e0, e11.ev0), e11_1 = EdgeEquation_testValue(&eqn.e1, e11.ev1), e11_2 = EdgeEquation_testValue(&eqn.e2, e11.ev2), e11_all = e11_0 && e11_1 && e11_2;
int result = e00_all + e01_all + e10_all + e11_all;
// Potentially all out.
if (result == 0)
{
// Test for special case.
bool e00Same = e00_0 == e00_1 == e00_2;
bool e01Same = e01_0 == e01_1 == e01_2;
bool e10Same = e10_0 == e10_1 == e10_2;
bool e11Same = e11_0 == e11_1 == e11_2;
if (!e00Same || !e01Same || !e10Same || !e11Same)
PixelShader_drawBlock(rs->m_pixelShader, &eqn, x, y, true);
}
else if (result == 4)
{
// Fully Covered.
PixelShader_drawBlock(rs->m_pixelShader, &eqn, x, y, false);
}
else
{
// Partially Covered.
PixelShader_drawBlock(rs->m_pixelShader, &eqn, x, y, true);
}
}
}
void Rasterizer_drawTriangleSpanTemplate(Rasterizer *rs, const RasterizerVertex *v0, const RasterizerVertex *v1, const RasterizerVertex *v2)
{
// Compute triangle equations.
TriangleEquations eqn;
TriangleEquations_construct(&eqn, v0, v1, v2, rs->m_pixelShader->AVarCount, rs->m_pixelShader->PVarCount);
// Check if triangle is backfacing.
if (eqn.area2 <= 0)
return;
const RasterizerVertex *t = v0;
const RasterizerVertex *m = v1;
const RasterizerVertex *b = v2;
// Sort vertices from top to bottom.
if (t->y > m->y) swap_ptrs(&t, &m);
if (m->y > b->y) swap_ptrs(&m, &b);
if (t->y > m->y) swap_ptrs(&t, &m);
float dy = (b->y - t->y);
float iy = (m->y - t->y);
if (m->y == t->y)
{
const RasterizerVertex *l = m, *r = t;
if (l->x > r->x) swap_ptrs(&l, &r);
Rasterizer_drawTopFlatTriangle(rs, &eqn, l, r, b);
}
else if (m->y == b->y)
{
const RasterizerVertex *l = m, *r = b;
if (l->x > r->x) swap_ptrs(&l, &r);
Rasterizer_drawBottomFlatTriangle(rs, &eqn, t, l, r);
}
else
{
RasterizerVertex v4;
v4.y = m->y;
v4.x = t->x + ((b->x - t->x) / dy) * iy;
if (rs->m_pixelShader->InterpolateZ) v4.z = t->z + ((b->z - t->z) / dy) * iy;
if (rs->m_pixelShader->InterpolateW) v4.w = t->w + ((b->w - t->w) / dy) * iy;
for (int i = 0; i < rs->m_pixelShader->AVarCount; ++i)
v4.avar[i] = t->avar[i] + ((b->avar[i] - t->avar[i]) / dy) * iy;
const RasterizerVertex *l = m, *r = &v4;
if (l->x > r->x) swap_ptrs(&l, &r);
Rasterizer_drawBottomFlatTriangle(rs, &eqn, t, l, r);
Rasterizer_drawTopFlatTriangle(rs, &eqn, l, r, b);
}
}
void Rasterizer_drawBottomFlatTriangle(Rasterizer *rs, const TriangleEquations *eqn, const RasterizerVertex *v0, const RasterizerVertex *v1, const RasterizerVertex *v2)
{
float invslope1 = (v1->x - v0->x) / (v1->y - v0->y);
float invslope2 = (v2->x - v0->x) / (v2->y - v0->y);
//float curx1 = v0.x;
//float curx2 = v0.x;
int scanlineY;
#pragma omp parallel for
for (scanlineY = (int)(v0->y + 0.5f); scanlineY < (int)(v1->y + 0.5f); scanlineY++)
{
float dy = (scanlineY - v0->y) + 0.5f;
float curx1 = v0->x + invslope1 * dy + 0.5f;
float curx2 = v0->x + invslope2 * dy + 0.5f;
// Clip to scissor rect
int xl = max(rs->m_minX, (int)curx1);
int xr = min(rs->m_maxX, (int)curx2);
PixelShader_drawSpan(rs->m_pixelShader, eqn, xl, scanlineY, xr);
// curx1 += invslope1;
// curx2 += invslope2;
}
}
void Rasterizer_drawTopFlatTriangle(Rasterizer *rs, const TriangleEquations *eqn, const RasterizerVertex *v0, const RasterizerVertex *v1, const RasterizerVertex *v2)
{
float invslope1 = (v2->x - v0->x) / (v2->y - v0->y);
float invslope2 = (v2->x - v1->x) / (v2->y - v1->y);
// float curx1 = v2.x;
// float curx2 = v2.x;
int scanlineY;
#pragma omp parallel for
for (scanlineY = (int)(v2->y - 0.5f); scanlineY > (int)(v0->y - 0.5f); scanlineY--)
{
float dy = (scanlineY - v2->y) + 0.5f;
float curx1 = v2->x + invslope1 * dy + 0.5f;
float curx2 = v2->x + invslope2 * dy + 0.5f;
// Clip to scissor rect
int xl = max(rs->m_minX, (int)curx1);
int xr = min(rs->m_maxX, (int)curx2);
PixelShader_drawSpan(rs->m_pixelShader, eqn, xl, scanlineY, xr);
// curx1 -= invslope1;
// curx2 -= invslope2;
}
}
void Rasterizer_drawTriangleAdaptiveTemplate(Rasterizer *rs, const RasterizerVertex *v0, const RasterizerVertex *v1, const RasterizerVertex *v2)
{
// Compute triangle bounding box.
float minX = (float)min(min(v0->x, v1->x), v2->x);
float maxX = (float)max(max(v0->x, v1->x), v2->x);
float minY = (float)min(min(v0->y, v1->y), v2->y);
float maxY = (float)max(max(v0->y, v1->y), v2->y);
float orient = (maxX - minX) / (maxY - minY);
if (orient > 0.4 && orient < 1.6)
Rasterizer_drawTriangleBlockTemplate(rs, v0, v1, v2);
else
Rasterizer_drawTriangleSpanTemplate(rs, v0, v1, v2);
}
void Rasterizer_drawTriangleModeTemplate(Rasterizer *rs, const RasterizerVertex *v0, const RasterizerVertex *v1, const RasterizerVertex *v2)
{
switch (rs->rasterMode)
{
case RM_Span:
Rasterizer_drawTriangleSpanTemplate(rs, v0, v1, v2);
break;
case RM_Block:
Rasterizer_drawTriangleBlockTemplate(rs, v0, v1, v2);
break;
case RM_Adaptive:
Rasterizer_drawTriangleAdaptiveTemplate(rs, v0, v1, v2);
break;
}
} |
GB_unaryop__lnot_uint8_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_uint32
// op(A') function: GB_tran__lnot_uint8_uint32
// C type: uint8_t
// A type: uint32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_uint32
(
uint8_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sequential-matix-vector-multiplication.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h> // This is the OpenMP API
void mxv(int m, int n, double * restrict a,
double * restrict b, double * restrict c);
int main(int argc, char *argv[])
{
double *a,*b,*c;
int i, j, m, n;
printf("Please give m and n: ");
scanf("%d %d",&m,&n);
// Allocation of Variables
if ( (a=(double *)malloc(m*sizeof(double))) == NULL)
perror("memory allocation for a");
if ( (b=(double *)malloc(m*n*sizeof(double))) == NULL )
perror("memory allocation for b");
if ( (c=(double *)malloc(n*sizeof(double))) == NULL )
perror("memory allocation for c");
// Initilization of matrix B and vector C
printf("Initializing matrix B and vector c\n");
//Vector
#pragma omp parallel for(defualt)
for (j=0; j<n; j++)
c[j] = 2.0;
//Matrix
for (i=0; i<m; i++)
#pragma omp parallel for(defualt)
for (j=0; j<n; j++)
b[i*n+j] = i;
printf("Executing mxv function for m = %d n = %d\n",m,n);
(void) mxv(m, n, a, b, c);
free(a);free(b);free(c);
return(0);
}
void mxv(int m, int n, double * restrict a,
double * restrict b, double * restrict c)
{
/*
PRE-CONDITION: Takes in the dimensions
POST CONDITION: The last loop computes the
dotproduct of row i of matrix b with vector c.
The result is stored in element i of vector a.*/
/* Default parallel loop with shared variables
and thread private variables i and j. Uncomment to activate*/
#pragma omp parallel for(defualt) shared(a, b, c, n, m) \
private(i, j)
int i, j;
for (i=0; i<m; i++)
{
a[i] = 0.0;
for (j=0; j<n; j++)
a[i] += b[i*n+j]*c[j];
}
}
|
gt_mm.c | /*
* PROJECT: GEM-Tools library
* FILE: gt_mm.c
* DATE: 01/02/2013
* AUTHOR(S): Santiago Marco-Sola <santiagomsola@gmail.com>
* DESCRIPTION:
* Memory Manager provides memory allocation functions. Different types of memory are supported.
* - UnitMemory
* Allocate relative small chunks of memory relying on the regular memory manager,
* usually malloc/calloc using a BuddySystem (Helper functions)
* - BulkMemory
* Allocate big chunks of memory and resort to disk if memory is not enough
* - SlabMemory
* Relative big amounts of objects allocated all at once (like the LINUX slab allocator)
* Objects of a certain type are ready to go inside the slab, thus reducing
* the overhead of malloc/setup/free cycles along the program
* - PoolMemory
* Pool of Slabs as gather all slabs needed along a program
* The goal is to minimize all memory malloc/setup/free overhead
* Offers thread safe allocation of slabs as to balance memory consumption across threads
*/
// TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO
// 1.- TCMalloc : Thread-Caching Malloc
// 2.- nedmalloc()
// 4.- madvise() / readahead() / posix_fadvise()
// TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO
#include "gt_mm.h"
// In some environments MAP_HUGETLB can be undefined
#ifndef MAP_HUGETLB
#define MAP_HUGETLB 0
#endif
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS 0 // TODO: disable for mac compatibility
#endif
#ifndef MAP_POPULATE
#define MAP_POPULATE 0 // TODO: disable for mac compatibility
#endif
/*
* Memory Alignment Utils
*/
const uint64_t gt_mm_mem_alignment_bits_mask[] = { // Check Memory Alignment Bits (Masks)
0x0000000000000001lu, /* 16 bits aligned ( 2B / 2^4) */
0x0000000000000003lu, /* 32 bits aligned ( 4B / 2^5) */
0x0000000000000007lu, /* 64 bits aligned ( 8B / 2^6) */
0x000000000000000Flu, /* 128 bits aligned ( 16B / 2^7) */
0x000000000000001Flu, /* 256 bits aligned ( 32B / 2^8) */
0x000000000000003Flu, /* 512 bits aligned ( 64B / 2^9) */
0x000000000000007Flu, /* 1024 bits aligned ( 1KB / 2^10) */
0x00000000000000FFlu, /* 2048 bits aligned ( 2KB / 2^11) */
0x00000000000001FFlu, /* 4096 bits aligned ( 4KB / 2^12) RegularPage Size*/
0x00000000000003FFlu, /* 8192 bits aligned ( 8KB / 2^13) */
0x00000000000007FFlu, /* 16384 bits aligned (16KB / 2^14) */
0x0000000000000FFFlu, /* 32768 bits aligned (32KB / 2^15) */
0x000000000003FFFFlu, /* n/a bits aligned ( 2MB / 2^21) RegularPageHugeTLB Size */
0x000000000007FFFFlu, /* n/a bits aligned ( 4MB / 2^21) */
};
/*
* MMap Constants/Values
*/
int gt_mm_proc_flags[3] = { PROT_READ, PROT_READ|PROT_WRITE, PROT_READ|PROT_WRITE };
int gt_mm_mmap_mode[3] = { MAP_PRIVATE, MAP_SHARED, MAP_SHARED };
/*
* Temporal folder path
*/
char* gt_mm_temp_folder_path = GT_MM_DEFAULT_TMP_FOLDER;
GT_INLINE char* gt_mm_get_tmp_folder() {
return gt_mm_temp_folder_path;
}
GT_INLINE void gt_mm_set_tmp_folder(char* const tmp_folder_path) {
GT_NULL_CHECK(tmp_folder_path);
gt_mm_temp_folder_path = tmp_folder_path;
}
/*
* UnitMemory
* Allocate relative small chunks of memory relying on the regular memory manager,
* usually malloc/calloc using a BuddySystem (Helper functions)
*/
GT_INLINE void* gt_malloc_(uint64_t const num_elements,const uint64_t size_element,const bool init_mem,const int init_value) {
const uint64_t total_memory = num_elements*size_element;
void* allocated_mem;
if (gt_expect_false(init_mem && init_value==0)) {
allocated_mem = calloc(num_elements,size_element);
gt_cond_fatal_error(!allocated_mem,MEM_CALLOC_INFO,num_elements,size_element);
} else {
allocated_mem = malloc(total_memory);
gt_cond_fatal_error(!allocated_mem,MEM_ALLOC_INFO,total_memory);
}
if (gt_expect_false(init_mem && init_value!=0)) memset(allocated_mem,init_value,total_memory);
//GT_MM_PRINT_MEM_ALIGMENT(allocated_mem); // Debug
return allocated_mem;
}
GT_INLINE void* gt_malloc_nothrow(uint64_t const num_elements,const uint64_t size_element,const bool init_mem,const int init_value) {
const uint64_t total_memory = num_elements*size_element;
void* const allocated_mem = (gt_expect_false(init_mem && init_value==0)) ?
calloc(num_elements,size_element) : malloc(total_memory);
if (!allocated_mem) return NULL;
if (gt_expect_false(init_mem && init_value!=0)) memset(allocated_mem,init_value,total_memory);
//GT_MM_PRINT_MEM_ALIGMENT(allocated_mem); // Debug
return allocated_mem;
}
GT_INLINE void gt_free(void* mem_addr) {
free(mem_addr);
}
/*
* BulkMemory
* Allocate big chunks of memory and resort to disk if memory is not enough
*/
GT_INLINE gt_mm* gt_mm_bulk_malloc(const uint64_t num_bytes,const bool init_mem) {
GT_ZERO_CHECK(num_bytes);
void* memory = gt_malloc_nothrow(num_bytes,1,init_mem,0);
if (gt_expect_true(memory!=NULL)) { // Fits in HEAP
gt_mm* const mm = gt_alloc(gt_mm);
mm->memory = memory;
mm->mem_type = GT_MM_HEAP;
mm->mode = GT_MM_READ_WRITE;
mm->allocated = num_bytes;
mm->cursor = mm->memory;
GT_MM_PRINT_MEM_ALIGMENT(mm->memory); // Debug
return mm;
} else { // Resort to MMAP in disk
gt_warn(MEM_ALLOC_DISK,num_bytes);
return gt_mm_bulk_mmalloc_temp(num_bytes);
}
}
GT_INLINE gt_mm* gt_mm_bulk_mmalloc(const uint64_t num_bytes,const bool use_huge_pages) {
GT_ZERO_CHECK(num_bytes);
// Allocate handler
gt_mm* const mm = gt_alloc(gt_mm);
/*
* MMap memory (anonymous)
* - MAP_PRIVATE => Fits in RAM+SWAP
* - MAP_ANONYMOUS => The mapping is not backed by any file; its contents are initialized to zero.
* Map against /dev/zero (Allocate anonymous memory segment, without open)
* - MAP_NORESERVE to explicitly enable swap space overcommitting. (echo 1 > /proc/sys/vm/overcommit_memory)
* Useful when you wish to map a file larger than the amount of free memory
* available on your system (RAM+SWAP).
* In this case, the lazy swap space reservation may cause the program
* to consume all the free RAM and swap on the system, eventually
* triggering the OOM killer (Linux) or causing a SIGSEGV.
*/
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
if (use_huge_pages) flags |= MAP_HUGETLB;
mm->memory = mmap(0,num_bytes,PROT_READ|PROT_WRITE,flags,-1,0);
gt_cond_fatal_error__perror(mm->memory==MAP_FAILED,MEM_ALLOC_MMAP_FAIL,num_bytes);
mm->cursor = mm->memory;
// Set MM
mm->mem_type = GT_MM_MMAPPED;
mm->mode = GT_MM_READ_WRITE;
mm->allocated = num_bytes;
mm->fd = -1;
mm->file_name = NULL;
// GT_MM_PRINT_MEM_ALIGMENT(mm->memory); // Debug
return mm;
}
GT_INLINE gt_mm* gt_mm_bulk_mmap_file(char* const file_name,const gt_mm_mode mode,const bool populate_page_tables) {
GT_NULL_CHECK(file_name);
// Allocate handler
gt_mm* const mm = gt_alloc(gt_mm);
// Retrieve input file info
struct stat stat_info;
gt_cond_fatal_error__perror(stat(file_name,&stat_info)==-1,FILE_STAT,file_name);
// Open file descriptor
mm->fd = open(file_name,gt_fm_oflags[mode],S_IRUSR);
gt_cond_fatal_error__perror(mm->fd==-1,FILE_OPEN,file_name);
/*
* Mmap file
* - @mode::
* GT_MM_READ_ONLY => MAP_PRIVATE (no copy-on-write as it's not allowed)
* GT_MM_WRITE_ONLY or GT_MM_READ_WRITE => MAP_SHARED
* - MAP_POPULATE (since Linux 2.5.46)
* Populate (prefault) page tables for a mapping. For a file mapping, this causes
* read-ahead on the file. Later accesses to the mapping will not be blocked by page faults.
* MAP_POPULATE is only supported for private mappings since Linux 2.6.23.
*/
int flags = gt_mm_mmap_mode[mode];
if (populate_page_tables) flags |= MAP_POPULATE;
mm->memory = mmap(0,stat_info.st_size,gt_mm_proc_flags[mode],flags,mm->fd,0);
gt_cond_fatal_error__perror(mm->memory==MAP_FAILED,SYS_MMAP_FILE,file_name);
mm->cursor = mm->memory;
// Set MM
mm->mem_type = GT_MM_MMAPPED;
mm->mode = mode;
mm->allocated = stat_info.st_size;
mm->file_name = gt_strndup(file_name,gt_strlen(file_name));
// GT_MM_PRINT_MEM_ALIGMENT(mm->memory); // Debug
return mm;
}
GT_INLINE gt_mm* gt_mm_bulk_mmalloc_temp(const uint64_t num_bytes) {
GT_ZERO_CHECK(num_bytes);
// Allocate handler
gt_mm* const mm = gt_alloc(gt_mm);
// TemporalMemory (backed by a file)
mm->file_name = gt_calloc(strlen(gt_mm_get_tmp_folder())+22,char,true);
sprintf(mm->file_name,"%sgt_mmalloc_temp_XXXXXX",gt_mm_get_tmp_folder());
// Create temporary file
mm->fd = mkstemp(mm->file_name);
gt_cond_fatal_error__perror(mm->fd==-1,SYS_MKSTEMP,mm->file_name);
gt_cond_fatal_error__perror(unlink(mm->file_name),SYS_HANDLE_TMP); // Make it temporary
// Set the size of the temporary file (disk allocation)
gt_cond_fatal_error__perror(lseek(mm->fd,num_bytes-1,SEEK_SET)==-1,SYS_HANDLE_TMP);
gt_cond_fatal_error__perror(write(mm->fd,"",1)<=0,SYS_HANDLE_TMP);
gt_cond_fatal_error__perror(lseek(mm->fd,0,SEEK_SET)==-1,SYS_HANDLE_TMP);
/*
* Mmap file.
* - MAP_SHARED as we the mapping will be reflected on disk (no copy-on-write)
* As such, the kernel knows it can always free up memory by doing writeback.
* - MAP_NORESERVE to explicitly enable swap space overcommitting. (echo 1 > /proc/sys/vm/overcommit_memory)
* Useful when you wish to map a file larger than the amount of free memory
* available on your system (RAM+SWAP).
* In this case, the lazy swap space reservation may cause the program
* to consume all the free RAM and swap on the system, eventually
* triggering the OOM killer (Linux) or causing a SIGSEGV.
*/
mm->memory = mmap(NULL,num_bytes,PROT_READ|PROT_WRITE,MAP_SHARED|MAP_NORESERVE,mm->fd,0);
gt_cond_fatal_error__perror(mm->memory==MAP_FAILED,MEM_ALLOC_MMAP_DISK_FAIL,num_bytes,mm->file_name);
mm->cursor = mm->memory;
// Set MM
mm->mem_type = GT_MM_MMAPPED;
mm->mode = GT_MM_READ_WRITE;
mm->allocated = num_bytes;
// GT_MM_PRINT_MEM_ALIGMENT(mm->memory); // Debug
return mm;
}
GT_INLINE void gt_mm_realloc(gt_mm* const mm,const uint64_t num_bytes) {
GT_MM_CHECK(mm);
gt_fatal_error(NOT_IMPLEMENTED); // TODO
// const uint64_t current_cursor_pos = gt_mm_get_current_position(mm);
// if (mm->mem_type==GT_MM_HEAP) { // Heap BulkMemory
// if (num_bytes > mm->allocated) {
// mm->memory = realloc(mm->memory);
// gt_cond_fatal_error(mm->memory==NULL,MEM_REALLOC);
// mm->cursor = mm->memory + current_cursor_pos;
// mm->allocated = num_bytes;
// }
// } else { // MMapped BulkMemory
// if (mm->fd!=-1) {
// if (mm->tmp_file) { // TemporalMemory
// mremap(mm->memory,mm->allocated,num_bytes,MREMAP_MAYMOVE);
// } else { // File mapped
//
// }
// } else { // Anonymous
//
// }
// }
}
GT_INLINE void gt_mm_free(gt_mm* const mm) {
GT_MM_CHECK(mm);
if (mm->mem_type==GT_MM_HEAP) { // Heap BulkMemory
gt_free(mm->memory);
} else { // MMapped BulkMemory
gt_cond_fatal_error__perror(munmap(mm->memory,mm->allocated)==-1,SYS_UNMAP);
if (mm->fd!=-1) {
gt_cond_fatal_error__perror(close(mm->fd),SYS_HANDLE_TMP);
}
}
gt_cfree(mm->file_name);
gt_free(mm);
}
GT_INLINE gt_mm* gt_mm_bulk_load_file(char* const file_name,const uint64_t num_threads) {
GT_NULL_CHECK(file_name);
// Allocate handler
gt_mm* const mm = gt_alloc(gt_mm);
// Retrieve input file info
struct stat stat_info;
gt_cond_fatal_error__perror(stat(file_name,&stat_info)==-1,FILE_STAT,file_name);
// Allocate memory to dump the content of the file
mm->memory = gt_malloc(stat_info.st_size);
gt_cond_fatal_error(!mm->memory,MEM_ALLOC_INFO,stat_info.st_size);
mm->mem_type = GT_MM_HEAP;
mm->mode = GT_MM_READ_ONLY;
mm->allocated = stat_info.st_size;
mm->cursor = mm->memory;
GT_MM_PRINT_MEM_ALIGMENT(mm->memory); // Debug
// Read the file and dump it into memory
if (num_threads>1 && (stat_info.st_size > num_threads*8)) {
gt_fm_bulk_read_file_parallel(file_name,mm->memory,0,0,num_threads);
} else {
gt_fm_bulk_read_file(file_name,mm->memory,0,0);
}
return mm;
}
GT_INLINE gt_mm* gt_mm_bulk_mload_file(char* const file_name,const uint64_t num_threads) {
GT_NULL_CHECK(file_name);
// Retrieve input file info
struct stat stat_info;
gt_cond_fatal_error__perror(stat(file_name,&stat_info)==-1,FILE_STAT,file_name);
// Allocate memory to dump the content of the file
gt_mm* const mm = gt_mm_bulk_mmalloc(stat_info.st_size,false);
// Read the file and dump it into memory
if (num_threads>1 && (stat_info.st_size > num_threads*8)) {
gt_fm_bulk_read_file_parallel(file_name,mm->memory,0,0,num_threads);
} else {
gt_fm_bulk_read_file(file_name,mm->memory,0,0);
}
return mm;
}
/*
* Accessors
*/
GT_INLINE void* gt_mm_get_mem(gt_mm* const mm) {
GT_MM_CHECK(mm);
return mm->cursor;
}
GT_INLINE void* gt_mm_get_base_mem(gt_mm* const mm) {
GT_MM_CHECK(mm);
return mm->memory;
}
GT_INLINE gt_mm_mode gt_mm_get_mode(gt_mm* const mm) {
GT_MM_CHECK(mm);
return mm->mode;
}
GT_INLINE void gt_mm_set_mode(gt_mm* const mm,const gt_mm_mode mode) {
GT_MM_CHECK(mm);
gt_fatal_error(NOT_IMPLEMENTED); // TODO
}
/*
* Seek functions
*/
GT_INLINE uint64_t gt_mm_get_current_position(gt_mm* const mm) {
GT_MM_CHECK(mm);
return (mm->cursor-mm->memory);
}
GT_INLINE bool gt_mm_eom(gt_mm* const mm) {
GT_MM_CHECK(mm);
return gt_mm_get_current_position(mm) >= mm->allocated;
}
GT_INLINE void gt_mm_seek(gt_mm* const mm,const uint64_t byte_position) {
GT_MM_CHECK(mm);
gt_fatal_check(byte_position>=mm->allocated,MEM_CURSOR_SEEK,byte_position);
mm->cursor = mm->memory + byte_position;
}
GT_INLINE void gt_mm_skip_forward(gt_mm* const mm,const uint64_t num_bytes) {
GT_MM_CHECK(mm);
mm->cursor += num_bytes;
GT_MM_CHECK_SEGMENT(mm);
}
GT_INLINE void gt_mm_skip_backward(gt_mm* const mm,const uint64_t num_bytes) {
GT_MM_CHECK(mm);
mm->cursor -= num_bytes;
GT_MM_CHECK_SEGMENT(mm);
}
GT_INLINE void gt_mm_skip_uint64(gt_mm* const mm) {
GT_MM_CHECK(mm);
mm->cursor += 8;
GT_MM_CHECK_SEGMENT(mm);
}
GT_INLINE void gt_mm_skip_uint32(gt_mm* const mm) {
GT_MM_CHECK(mm);
mm->cursor += 4;
GT_MM_CHECK_SEGMENT(mm);
}
GT_INLINE void gt_mm_skip_uint16(gt_mm* const mm) {
GT_MM_CHECK(mm);
mm->cursor += 2;
GT_MM_CHECK_SEGMENT(mm);
}
GT_INLINE void gt_mm_skip_uint8(gt_mm* const mm) {
GT_MM_CHECK(mm);
mm->cursor += 1;
GT_MM_CHECK_SEGMENT(mm);
}
GT_INLINE void gt_mm_skip_align(gt_mm* const mm,const uint64_t num_bytes) {
GT_MM_CHECK(mm);
GT_ZERO_CHECK(num_bytes);
if (gt_expect_true(num_bytes > 1)) {
mm->cursor = mm->cursor+(num_bytes-1);
mm->cursor = mm->cursor-(GT_MM_CAST_ADDR(mm->cursor)%(num_bytes-1));
GT_MM_CHECK_SEGMENT(mm);
gt_fatal_check(GT_MM_CAST_ADDR(mm->cursor)%(num_bytes-1)!=0,MEM_ALG_FAILED);
}
}
GT_INLINE void gt_mm_skip_align_16(gt_mm* const mm) {
GT_MM_CHECK(mm);
mm->cursor = GT_MM_CAST_PTR(
(GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_16b) & (~GT_MM_MEM_ALIGNED_MASK_16b));
GT_MM_CHECK_SEGMENT(mm);
GT_MM_CHECK_ALIGNMENT(mm,16b);
}
GT_INLINE void gt_mm_skip_align_32(gt_mm* const mm) {
GT_MM_CHECK(mm);
mm->cursor = GT_MM_CAST_PTR(
(GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_32b) & (~GT_MM_MEM_ALIGNED_MASK_32b));
GT_MM_CHECK_SEGMENT(mm);
GT_MM_CHECK_ALIGNMENT(mm,32b);
}
GT_INLINE void gt_mm_skip_align_64(gt_mm* const mm) {
GT_MM_CHECK(mm);
mm->cursor = GT_MM_CAST_PTR(
(GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_64b) & (~GT_MM_MEM_ALIGNED_MASK_64b));
GT_MM_CHECK_SEGMENT(mm);
GT_MM_CHECK_ALIGNMENT(mm,64b);
}
GT_INLINE void gt_mm_skip_align_128(gt_mm* const mm) {
GT_MM_CHECK(mm);
mm->cursor = GT_MM_CAST_PTR(
(GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_128b) & (~GT_MM_MEM_ALIGNED_MASK_128b));
GT_MM_CHECK_SEGMENT(mm);
GT_MM_CHECK_ALIGNMENT(mm,128b);
}
GT_INLINE void gt_mm_skip_align_512(gt_mm* const mm) {
GT_MM_CHECK(mm);
mm->cursor = GT_MM_CAST_PTR(
(GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_512b) & (~GT_MM_MEM_ALIGNED_MASK_512b));
GT_MM_CHECK_SEGMENT(mm);
GT_MM_CHECK_ALIGNMENT(mm,512b);
}
GT_INLINE void gt_mm_skip_align_1024(gt_mm* const mm) {
GT_MM_CHECK(mm);
mm->cursor = GT_MM_CAST_PTR(
(GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_1KB) & (~GT_MM_MEM_ALIGNED_MASK_1KB));
GT_MM_CHECK_SEGMENT(mm);
GT_MM_CHECK_ALIGNMENT(mm,1KB);
}
GT_INLINE void gt_mm_skip_align_4KB(gt_mm* const mm) {
GT_MM_CHECK(mm);
mm->cursor = GT_MM_CAST_PTR(
(GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_4KB) & (~GT_MM_MEM_ALIGNED_MASK_4KB));
GT_MM_CHECK_SEGMENT(mm);
GT_MM_CHECK_ALIGNMENT(mm,4KB);
}
GT_INLINE void gt_mm_skip_align_mempage(gt_mm* const mm) {
GT_MM_CHECK(mm);
uint64_t sz = sysconf(_SC_PAGESIZE);
gt_mm_skip_align(mm,sz);
}
/*
* Read functions
*/
GT_INLINE uint64_t gt_mm_read_uint64(gt_mm* const mm) {
GT_MM_CHECK(mm);
GT_MM_CHECK_SEGMENT(mm);
const uint64_t data = *((uint64_t*)mm->cursor);
mm->cursor += 8;
return data;
}
GT_INLINE uint32_t gt_mm_read_uint32(gt_mm* const mm) {
GT_MM_CHECK(mm);
GT_MM_CHECK_SEGMENT(mm);
const uint32_t data = *((uint32_t*)mm->cursor);
mm->cursor += 4;
return data;
}
GT_INLINE uint16_t gt_mm_read_uint16(gt_mm* const mm) {
GT_MM_CHECK(mm);
GT_MM_CHECK_SEGMENT(mm);
const uint16_t data = *((uint16_t*)mm->cursor);
mm->cursor += 2;
return data;
}
GT_INLINE uint8_t gt_mm_read_uint8(gt_mm* const mm) {
GT_MM_CHECK(mm);
GT_MM_CHECK_SEGMENT(mm);
const uint8_t data = *((uint8_t*)mm->cursor);
mm->cursor += 1;
return data;
}
GT_INLINE void* gt_mm_read_mem(gt_mm* const mm,const uint64_t num_bytes) {
GT_MM_CHECK(mm);
GT_MM_CHECK_SEGMENT(mm);
void* const current_cursor = mm->cursor;
mm->cursor += num_bytes;
return current_cursor;
}
GT_INLINE void gt_mm_copy_mem(gt_mm* const mm,void* const dst,const uint64_t num_bytes) {
GT_MM_CHECK(mm);
GT_MM_CHECK_SEGMENT(mm);
memcpy(dst,mm->cursor,num_bytes);
mm->cursor += num_bytes;
}
GT_INLINE void gt_mm_copy_mem_parallel(
gt_mm* const mm,void* const dst,const uint64_t num_bytes,const uint64_t num_threads) {
GT_MM_CHECK(mm);
GT_MM_CHECK_SEGMENT(mm);
// Calculate size of each chunk
const uint64_t chunk_size = num_bytes/num_threads; // num_bytes > num_threads
#ifdef HAVE_OPENMP
//#pragma omp parallel num_threads(num_threads)
#endif
{
// Calculate offsets
#ifdef HAVE_OPENMP
const uint64_t tid = omp_get_thread_num();
#else
const uint64_t tid = 0;
#endif
const uint64_t offset = tid*chunk_size;
const uint64_t size = (tid < (num_threads-1)) ? chunk_size : num_bytes-chunk_size*tid;
// Copy the chunk
memcpy(dst+offset,mm->cursor+offset,size);
}
mm->cursor += num_bytes;
}
/*
* Write functions
*/
GT_INLINE void gt_mm_write_uint64(gt_mm* const mm,const uint64_t data) {
GT_MM_CHECK(mm);
GT_MM_CHECK_SEGMENT(mm);
*((uint64_t*)mm->cursor) = data;
mm->cursor += 8;
}
GT_INLINE void gt_mm_write_uint32(gt_mm* const mm,const uint32_t data) {
GT_MM_CHECK(mm);
GT_MM_CHECK_SEGMENT(mm);
*((uint32_t*)mm->cursor) = data;
mm->cursor += 4;
}
GT_INLINE void gt_mm_write_uint16(gt_mm* const mm,const uint16_t data) {
GT_MM_CHECK(mm);
GT_MM_CHECK_SEGMENT(mm);
*((uint16_t*)mm->cursor) = data;
mm->cursor += 2;
}
GT_INLINE void gt_mm_write_uint8(gt_mm* const mm,const uint8_t data) {
GT_MM_CHECK(mm);
GT_MM_CHECK_SEGMENT(mm);
*((uint8_t*)mm->cursor) = data;
mm->cursor += 1;
}
GT_INLINE void gt_mm_write_mem(gt_mm* const mm,void* const src,const uint64_t num_bytes) {
GT_MM_CHECK(mm);
GT_MM_CHECK_SEGMENT(mm);
// TODO
}
/*
* SlabMemory
* Relative big amounts of objects allocated all at once (like the LINUX slab allocator)
* Objects of a certain type are ready to go inside the slab, thus reducing
* the overhead of malloc/setup/free cycles along the program
*/
//GT_INLINE void* gt_mm_slab_mmalloc(gt_mm_slab* const slab,const uint64_t num_elements); // TODO
//GT_INLINE void gt_mm_slab_mfree(gt_mm_slab* const slab,void* mem_addr,const uint64_t num_elements); // TODO
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% John Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireCacheView(image);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,AllChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
id;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=GetOpenMPMaximumThreads();
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (id=0; id < (ssize_t) number_threads; id++)
current_depth[id]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
register const PixelPacket
*restrict p;
register ssize_t
i;
p=image->colormap;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
if (status == MagickFalse)
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status|=p->red != ScaleAnyToQuantum(ScaleQuantumToAny(p->red,
range),range);
if ((channel & GreenChannel) != 0)
status|=p->green != ScaleAnyToQuantum(ScaleQuantumToAny(p->green,
range),range);
if ((channel & BlueChannel) != 0)
status|=p->blue != ScaleAnyToQuantum(ScaleQuantumToAny(p->blue,
range),range);
if (status == 0)
break;
current_depth[id]++;
}
p++;
}
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status|=p->red != ScaleAnyToQuantum(ScaleQuantumToAny(p->red,range),
range);
if ((channel & GreenChannel) != 0)
status|=p->green != ScaleAnyToQuantum(ScaleQuantumToAny(p->green,
range),range);
if ((channel & BlueChannel) != 0)
status|=p->blue != ScaleAnyToQuantum(ScaleQuantumToAny(p->blue,range),
range);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
status|=p->opacity != ScaleAnyToQuantum(ScaleQuantumToAny(p->opacity,
range),range);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status|=indexes[x] != ScaleAnyToQuantum(ScaleQuantumToAny(indexes[x],
range),range);
if (status == 0)
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double)
MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
if (image->colorspace == CMYKColorspace)
return(MagickFalse);
type=BilevelType;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->type=type;
if ((type == GrayscaleType) && (image->matte != MagickFalse))
((Image *) image)->type=GrayscaleMatteType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (image->colorspace == CMYKColorspace)
return(MagickFalse);
type=BilevelType;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsMonochromePixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (p->opacity != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,AllChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (GetImageDepth(image,&image->exception) <= (size_t)
MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH))
{
image->depth=depth;
return(MagickTrue);
}
/*
Scale pixels to desired depth.
*/
status=MagickTrue;
range=GetQuantumRange(depth);
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=ScaleAnyToQuantum(ScaleQuantumToAny(q->red,range),range);
if ((channel & GreenChannel) != 0)
q->green=ScaleAnyToQuantum(ScaleQuantumToAny(q->green,range),range);
if ((channel & BlueChannel) != 0)
q->blue=ScaleAnyToQuantum(ScaleQuantumToAny(q->blue,range),range);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
q->opacity=ScaleAnyToQuantum(ScaleQuantumToAny(q->opacity,range),range);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=ScaleAnyToQuantum(ScaleQuantumToAny(indexes[x],range),range);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelPacket
*restrict p;
p=image->colormap;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
p->red=ScaleAnyToQuantum(ScaleQuantumToAny(p->red,range),range);
if ((channel & GreenChannel) != 0)
p->green=ScaleAnyToQuantum(ScaleQuantumToAny(p->green,range),range);
if ((channel & BlueChannel) != 0)
p->blue=ScaleAnyToQuantum(ScaleQuantumToAny(p->blue,range),range);
if ((channel & OpacityChannel) != 0)
p->opacity=ScaleAnyToQuantum(ScaleQuantumToAny(p->opacity,range),
range);
p++;
}
}
image->depth=depth;
return(status);
}
|
2068.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp target teams distribute schedule(dynamic, 8)
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
eint.h | /*
* Copyright 2017 Benjamin Santos <caos21@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef EINT_H
#define EINT_H
#define FINITE 1
#include <iostream>
#include <fstream>
#include <cmath>
#include <string>
#include <vector>
#include <utility>
#include <algorithm>
#include <omp.h>
#include <boost/array.hpp>
#include <boost/math/tools/roots.hpp>
// #include <boost/numeric/odeint.hpp>
//#include <boost/math/special_functions/factorials.hpp>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <boost/numeric/ublas/symmetric.hpp>
#include <boost/numeric/ublas/vector_proxy.hpp>
#include <boost/numeric/ublas/io.hpp>
#include <boost/numeric/ublas/operation.hpp>
#include <boost/numeric/bindings/traits/ublas_matrix.hpp>
#include <boost/numeric/bindings/traits/ublas_sparse.hpp>
#include <boost/numeric/bindings/lapack/gesv.hpp>
#include <boost/numeric/bindings/lapack/sysv.hpp>
#include <boost/numeric/bindings/traits/ublas_vector2.hpp>
// #include <boost/numeric/bindings/umfpack/umfpack.hpp>
#include "array.h"
#include "constants.h"
namespace tools = boost::math::tools;
namespace ublas = boost::numeric::ublas;
namespace lapack = boost::numeric::bindings::lapack;
// namespace umf = boost::numeric::bindings::umfpack;
namespace bmath = boost::math;
#include <boost/multiprecision/cpp_int.hpp>
using boost::multiprecision::cpp_int;
#include <boost/multiprecision/cpp_dec_float.hpp>
using boost::multiprecision::cpp_dec_float_50;
using boost::multiprecision::cpp_dec_float_100;
typedef boost::multiprecision::number<boost::multiprecision::cpp_dec_float<50>> mpfloat;
//typedef double mpfloat;
//typedef long double mpfloat;
struct TerminationCondition {
bool operator() (double min, double max) {
return abs((min - max)/min) <= 0.0001;
}
};
namespace eint {
typedef ublas::matrix<double> ubmatrix;
typedef ublas::vector<double> ubvector;
//! Factorized (dimensionless) van der Waals non-retarded potential
/*!
\param rstar the separation r/(r1+r2).
\param omega the ratio r1/(r1+r2).
\param HA the Hamaker constant.
*/
inline
double potential_vdw_fact(double rstar, double omega, double HA=1.0) {
double term1 = 2.0*omega*(1.0-omega)/(rstar*rstar-1.0);
double term2 = 2.0*omega*(1.0-omega)/(rstar*rstar-pow(2.0*omega-1.0,2));
double term3 = log((rstar*rstar-1.0)/(rstar*rstar-pow(2.0*omega-1.0,2)));
double prefactor = -HA/6.0;
return prefactor*(term1 + term2 + term3);
}
//! van der Waals potential
/*!
\param rto the separation.
\param r1o radius of particle 1.
\param r2o radius of particle 2.
\param HA the Hamaker constant.
\param rSi van der Waals radius of silicon or cutoff.
*/
inline
double potential_vdw(double rto, double r1o, double r2o, double rSi=0.21e-9,
double HA=20e-20){
double omega = r1o/(r1o+r2o);
double rstar = rto/(r1o+r2o);
double pot_vdw = potential_vdw_fact(rstar+rSi/(r1o+r2o), omega)*tanh((rstar)/(rSi/(r1o+r2o)))*HA;
return pot_vdw;
}
//! van der Waals potential non dimensional
/*!
\param rt the separation rt/r1.
\param r21 ratio r2/r1.
*/
inline
double potential_vdw_nodim(double rt, double r21) {
return (-2.*r21/(rt*rt - pow(r21 + 1., 2)) - 2.*r21/(rt*rt - pow(-r21 + 1., 2))
+ log((rt*rt - pow(-r21 + 1.,2))/(rt*rt - pow(r21 + 1., 2))));
}
//! van der Waals force non dimensional
/*!
\param rt the separation rt/r1.
\param r21 ratio r2/r1.
*/
inline
double force_vdw_nodim(double rt, double r21) {
return (- 4.*rt*r21/pow(rt*rt - pow(r21 + 1., 2), 2)
- 4.*rt*r21/pow(rt*rt - pow(-r21 + 1., 2), 2)
+ 2.*rt/(rt*rt - pow(r21 + 1., 2))
- 2.*rt/(rt*rt - pow(-r21 + 1., 2)));
}
//! Factorized (dimensionless) Coulomb potential
/*!
\param rt the separation.
\param r21 the ratio r2/r1.
\param q21 the ratio q2/q1.
\param eps the dielectric constant.
*/
inline
double potential_coulomb_fact(const double rt, const double r21,
const double q21) {
return Kcoul*q21/rt;
}
//! Factorized (dimensionless) Coulomb force
/*!
\param rt the separation.
\param r21 the ratio r2/r1.
\param q21 the ratio q2/q1.
\param eps the dielectric constant.
*/
inline
double force_coulomb_fact(const double rt, const double r21,
const double q21) {
return Kcoul*q21/(rt*rt);
}
//! Factorized (dimensionless) potential approximation IPA.
/*!
\param rt the separation.
\param r21 the ratio r2/r1.
\param q21 the ratio q2/q1.
\param eps the dielectric constant.
*/
inline
double potential_ipa_fact(const double rt, const double r21,
const double q21, const double eps) {
double kappa = (eps-1.0)/(eps+2.0);
double A = kappa*Kcoul*pow(r21, 3);
double B = kappa*Kcoul*q21*q21;
return Kcoul*q21/rt
- A/(2*rt*rt*(rt*rt - r21*r21))
- B/(2*rt*rt*(rt*rt - 1.0));
}
//! Factorized (dimensionless) force approximation IPA.
/*!
\param rt the separation.
\param r21 the ratio r2/r1.
\param q21 the ratio q2/q1.
\param eps the dielectric constant.
*/
inline
double force_ipa_fact(const double rt, const double r21,
const double q21, const double eps) {
double kappa = (eps-1.0)/(eps+2.0);
double A = kappa*Kcoul*pow(r21, 3) * (2.*rt*rt-r21*r21);
double B = kappa*Kcoul*q21*q21*(2.*rt*rt-1.);
return Kcoul*q21/(rt*rt)
- A/(pow(rt, 3)*pow((rt*rt - r21*r21),2))
- B/(pow(rt, 3)*pow(rt*rt - 1.0, 2));
}
// Coulomb vdW
//! coulomb + van der Waals potential
/*!
\param r the separation.
\param r1 radius of particle 1.
\param q1 charge of particle 1.
\param r2 radius of particle 2.
\param q2 charge of particle 1.
\param eps the dielectric constant.
\param HA the Hamaker constant.
\param rc van der Waals radius of silicon or cutoff.
\param coulomb switch / multiplier for coulomb
\param vdw switch / multiplier for vdw
*/
inline
double potential_coulombvdw(double r,
double r1,
double q1,
double r2,
double q2,
double eps,
double AH,
double rc,
double coulomb=1.0,
double vdw=1.0) {
// swap particles. We want to keep r21 < 1
if (r2 > r1) {
std::swap(r1, r2);
std::swap(q1, q2);
}
double q21 = 0.0;
// for neutrals only vdW holds
// WARNING float comparison
if ((q1 == 0.0) && (q2 == 0.0)) {
coulomb = 0.0;
q21 = 0.0;
}
else {
if (q1 == 0.0) {// p1 is neutral
std::swap(r1, r2);
std::swap(q1, q2);
q21 = 0.0;
}
else {// q1 != 0 and q2
q21 = q2/q1;
}
}
double r21 = r2 / r1;
double rt = r/r1;
double rs = 0.0;
if (r > r1+r2+rc){
rs = r/r1;
}
else {
rs = (r1+r2+rc)/r1;
}
double coulombfactor = coulomb * q1*q1 / r1;
double vdwfactor = vdw * AH / 6.0;
double coulombpot = 0.0;
double vdwpot = 0.0;
if (coulomb > 0.0) {// only coulomb
coulombpot = coulombfactor * potential_coulomb_fact(rt, r21, q21);
}
if (vdw > 0.0) {// only vdw
vdwpot = vdwfactor * potential_vdw_nodim(rs, r21);
}
return coulombpot + vdwpot;
}
//! coulomb + van der Waals force
/*!
\param r the separation.
\param r1 radius of particle 1.
\param q1 charge of particle 1.
\param r2 radius of particle 2.
\param q2 charge of particle 1.
\param eps the dielectric constant.
\param HA the Hamaker constant.
\param rc van der Waals radius of silicon or cutoff.
\param coulomb switch / multiplier for coulomb
\param vdw switch / multiplier for vdw
*/
inline
double force_coulombvdw(double r,
double r1,
double q1,
double r2,
double q2,
double eps,
double AH,
double rc,
double coulomb=1.0,
double vdw=1.0) {
// swap particles. We want to keep r21 < 1
if (r2 > r1) {
std::swap(r1, r2);
std::swap(q1, q2);
}
double q21 = 0.0;
// for neutrals only vdW holds
// WARNING float comparison
if ((q1 == 0.0) && (q2 == 0.0)) {
coulomb = 0.0;
q21 = 0.0;
}
else {
if (q1 == 0.0) {// p1 is neutral
std::swap(r1, r2);
std::swap(q1, q2);
q21 = 0.0;
}
else {// q1 != 0 and q2
q21 = q2/q1;
}
}
double r21 = r2 / r1;
double rt = r/r1;
double rs = 0.0;
if (r > r1+r2+rc){
rs = r/r1;
}
else {
rs = (r1+r2+rc)/r1;
}
double coulombfactor = coulomb * q1*q1 / (r1*r1);
double vdwfactor = vdw * AH / (6.0*r1);
double coulombforce = 0.0;
double vdwforce = 0.0;
if (coulomb > 0.0) {// only coulomb
coulombforce = coulombfactor * force_coulomb_fact(rt, r21, q21);
}
if (vdw > 0.0) {// only vdw
vdwforce = vdwfactor * force_vdw_nodim(rs, r21);
}
return coulombforce + vdwforce;
}
// IPA vdW
//! IPA + van der Waals potential
/*!
\param r the separation.
\param r1 radius of particle 1.
\param q1 charge of particle 1.
\param r2 radius of particle 2.
\param q2 charge of particle 1.
\param eps the dielectric constant.
\param HA the Hamaker constant.
\param rc van der Waals radius of silicon or cutoff.
\param ipa switch / multiplier for ipa
\param vdw switch / multiplier for vdw
*/
inline
double potential_ipavdw(double r,
double r1,
double q1,
double r2,
double q2,
double eps,
double AH,
double rc,
double ipa=1.0,
double vdw=1.0) {
// swap particles. We want to keep r21 < 1
if (r2 > r1) {
std::swap(r1, r2);
std::swap(q1, q2);
}
double q21 = 0.0;
// for neutrals only vdW holds
// WARNING float comparison
if ((q1 == 0.0) && (q2 == 0.0)) {
ipa = 0.0;
q21 = 0.0;
}
else {
if (q1 == 0.0) {// p1 is neutral
std::swap(r1, r2);
std::swap(q1, q2);
q21 = 0.0;
}
else {// q1 != 0 and q2
q21 = q2/q1;
}
}
double r21 = r2 / r1;
double rt = r/r1;
double rs = 0.0;
if (r > r1+r2+rc){
rs = r/r1;
}
else {
rs = (r1+r2+rc)/r1;
}
double ipafactor = ipa * q1*q1 / r1;
double vdwfactor = vdw * AH / 6.0;
double ipapot = 0.0;
double vdwpot = 0.0;
if (ipa > 0.0) {// only ipa
ipapot = ipafactor * potential_ipa_fact(rt, r21, q21, eps);
}
if (vdw > 0.0) {// only vdw
vdwpot = vdwfactor * potential_vdw_nodim(rs, r21);
}
return ipapot + vdwpot;
}
//! IPA + van der Waals force
/*!
\param r the separation.
\param r1 radius of particle 1.
\param q1 charge of particle 1.
\param r2 radius of particle 2.
\param q2 charge of particle 1.
\param eps the dielectric constant.
\param HA the Hamaker constant.
\param rc van der Waals radius of silicon or cutoff.
\param ipa switch / multiplier for ipa
\param vdw switch / multiplier for vdw
*/
inline
double force_ipavdw(double r,
double r1,
double q1,
double r2,
double q2,
double eps,
double AH,
double rc,
double ipa=1.0,
double vdw=1.0) {
// swap particles. We want to keep r21 < 1
if (r2 > r1) {
std::swap(r1, r2);
std::swap(q1, q2);
}
double q21 = 0.0;
// for neutrals only vdW holds
// WARNING float comparison
if ((q1 == 0.0) && (q2 == 0.0)) {
ipa = 0.0;
q21 = 0.0;
}
else {
if (q1 == 0.0) {// p1 is neutral
std::swap(r1, r2);
std::swap(q1, q2);
q21 = 0.0;
}
else {// q1 != 0 and q2
q21 = q2/q1;
}
}
double r21 = r2 / r1;
double rt = r/r1;
double rs = 0.0;
if (r > r1+r2+rc){
rs = r/r1;
}
else {
rs = (r1+r2+rc)/r1;
}
double ipafactor = ipa * q1*q1 / (r1*r1);
double vdwfactor = vdw * AH / (6.0*r1);
double ipaforce = 0.0;
double vdwforce = 0.0;
if (ipa > 0.0) {// only ipa
ipaforce = ipafactor * force_ipa_fact(rt, r21, q21, eps);
}
if (vdw > 0.0) {// only vdw
vdwforce = vdwfactor * force_vdw_nodim(rs, r21);
}
return ipaforce + vdwforce;
}
/************/
//! Series potential approximation.
/*!
* Potential energy for a point charge q and dielectric sphere radius a
* at a distance s, Stratton pag. 204
* Here we add the interaction of the other particle. Superposed.
* SIPA
\param rt the separation.
\param r21 the ratio r2/r1.
\param q21 the ratio q2/q1.
\param eps the dielectric constant.
\param terms the number of terms in the expansion.
*/
inline
double potential_series(const double r1, const double q1,
const double r2, const double q2, const double rt,
const double eps, unsigned int terms=50) {
// Potential r1<<r2 q2=0
double prefactor1 = Kcoul * q1*q1 * (eps-1.0) / (2.*r2);
double pot1 = 0.0;
for(unsigned int j=0.0; j<terms; ++j) {
double numer = j*pow(r2/rt, 2*j+2);
double denom = (j*eps+j+1.0);
pot1 += numer/denom;
}
pot1 *= prefactor1;
//Potential r2<<r1 q1=0
double prefactor2 = Kcoul * q2*q2 * (eps-1.0) / (2.*r1);
double pot2 = 0.0;
for(unsigned int j=0.0; j<terms; ++j) {
double numer = j*pow(r1/rt, 2*j+2);
double denom = (j*eps+j+1.0);
pot2 += numer/denom;
}
pot2 *= prefactor2;
//Coulomb potential energy
double pot_c = Kcoul*q1*q2/rt;
return pot_c - pot1 - pot2;
}
//! Series force approximation.
/*!
* Force for a point charge q and dielectric sphere radius a
* at a distance s, Stratton pag. 204
* Here we add the interaction of the other particle. Superposed.
* SIPA
\param rt the separation.
\param r21 the ratio r2/r1.
\param q21 the ratio q2/q1.
\param eps the dielectric constant.
\param terms the number of terms in the expansion.
*/
inline
double force_series(const double r1, const double q1,
const double r2, const double q2, const double rt,
const double eps, unsigned int terms=50) {
// Potential r1<<r2 q2=0
double prefactor1 = Kcoul * q1*q1;
double force1 = 0.0;
for(unsigned int j=0.0; j<terms; ++j) {
double numer = pow(r2, 2*j+1)*(j*(j+1)*(eps-1.0));
double denom = pow(rt, 2*j+3)*(j*eps+j+1.0);
force1 += numer/denom;
}
force1 *= prefactor1;
//Potential r2<<r1 q1=0
double prefactor2 = Kcoul * q2*q2;
double force2 = 0.0;
for(unsigned int j=0.0; j<terms; ++j) {
double numer = pow(r1, 2*j+1)*(j*(j+1)*(eps-1.0));
double denom = pow(rt, 2*j+3)*(j*eps+j+1.0);
force2 += numer/denom;
}
force2 *= prefactor2;
//Coulomb potential energy
double force_c = Kcoul*q1*q2/(rt*rt);
return force_c - force1 - force2;
}
/************/
// unsigned int factorial(unsigned int n) {
// return (n == 1 || n == 0) ? 1 : factorial(n - 1) * n;
// }
inline
double factorial(unsigned int uin) {
double n = static_cast<double>(uin);
return (n == 1 || n == 0) ? 1 : factorial(n - 1) * n;
}
template <typename T>
T from_stirling(double x){
// helper, derived from Stirling's approximation
T y = x;
T ret = y*log(y)+0.5*log(2.*pi*y);
return ret;
}
template <typename T>
T stirling_quotient(double x, double y){
T ret = from_stirling<T>(x+y) - from_stirling<T>(x) - from_stirling<T>(y);
return ret;
}
// compute D matrix elements
inline
double dijlm(double rt, double r1, double r2, unsigned int l, unsigned int m) {
if (m==0) {
return pow(r1/rt, l);
}
unsigned int lm = l+m;
if (lm <= NFMAX-1){
// use exact factorials
return (Facts[lm]/(Facts[l]*Facts[m]))*pow(r1/rt, l)*pow(r2/rt, m);
}
else {
// Stirling's approximation
double lpart = l*(log((lm)*r1/(l*rt)));
double mpart = m*(log((lm)*r2/(m*rt)));
double apart = 0.5*log(lm/(2.*pi*l*m));
double hoterms = 1./(12*lm) - 1./(360*lm*lm*lm);
return exp(lpart+mpart+apart+hoterms);
}
}
// compute C Matrix
inline
double clm(double rt, double r21, unsigned int l, unsigned int m, double eps) {
if (l==m) {
double cval = (rt/r21)*(((eps+1)*l+1)/((eps-1)*l));
return cval;
}
return 0.0;
}
// compute multipolar coefficients
inline
void compute_MPCoefficients(ubvector &A1coefficients,
ubvector &A2coefficients,
double &A10,
double &A20,
const double rt,
const double r21,
const double q21,
const double eps,
const unsigned int nterms) {
A10 = Kcoul;
A20 = Kcoul*q21/r21;
// system is nterms - 0th term
unsigned int ssize = nterms-1;
#ifdef USING_EIGEN
VectorDD b1 = VectorDD::Zero(ssize);
VectorDD b2 = VectorDD::Zero(ssize);
MatrixDD C1 = MatrixDD::Zero(ssize, ssize);
MatrixDD C2 = MatrixDD::Zero(ssize, ssize);
MatrixDD C1_inv = MatrixDD::Zero(ssize, ssize);
MatrixDD C2_inv = MatrixDD::Zero(ssize, ssize);
MatrixDD D1 = MatrixDD::Zero(ssize, ssize);
MatrixDD D2 = MatrixDD::Zero(ssize, ssize);
#else
ubvector b1 = ublas::zero_vector<double>(ssize);
ubvector b2 = ublas::zero_vector<double>(ssize);
ubmatrix C1 = ublas::zero_matrix<double>(ssize, ssize);
ubmatrix C2 = ublas::zero_matrix<double>(ssize, ssize);
ubmatrix C1_inv = ublas::zero_matrix<double>(ssize, ssize);
ubmatrix C2_inv = ublas::zero_matrix<double>(ssize, ssize);
ubmatrix D1 = ublas::zero_matrix<double>(ssize, ssize);
ubmatrix D2 = ublas::zero_matrix<double>(ssize, ssize);
#endif
for (unsigned int l=0; l<ssize; ++l) {
// b vectors
b1(l) = -dijlm(rt, 1.0, r21, l+1, 0) * A20;
b2(l) = -dijlm(rt, r21, 1.0, l+1, 0) * A10;
//Fill C Matrices (diagonal)
C1(l, l) = clm(rt, r21, l+1, l+1, eps);
C2(l, l) = clm(rt, 1.0, l+1, l+1, eps);
//inverse C diagonal matrices
C1_inv(l, l) = 1.0/C1(l, l);
C2_inv(l, l) = 1.0/C2(l, l);
}
for (unsigned int l=0; l<ssize; ++l) {
for (unsigned int m=0; m<ssize; ++m) {
//Fill D Matrices
D1(l, m) = dijlm(rt, 1.0, r21, l+1, m+1);
D2(l, m) = dijlm(rt, r21, 1.0, l+1, m+1);
}
}
#ifdef USING_EIGEN
#ifdef USING_AMPC//using complete matrix A
MatrixDD M = MatrixDD::Zero(2*ssize, 2*ssize);
M.block(0 , 0, ssize , ssize) = C1;
M.block(0 , ssize, ssize , ssize) = D1;
M.block(ssize, 0, ssize , ssize) = C2;
M.block(ssize, ssize, ssize , ssize) = D2;
VectorDD B = VectorDD::Zero(2*ssize);
B.segment(0 , ssize) = b1;
B.segment(ssize, ssize) = b2;
/* MatrixDD S = M.selfadjointView<Eigen::Upper>(); */
/* Eigen::ConjugateGradient<MatrixDD, Eigen::Upper> cg; */
/* cg.compute(S); */
/* VectorDD X(2*ssize); */
/* X = cg.solve(B); */
/* std::cerr << "[ii] #iterations: " << cg.iterations() << std::endl; */
/* std::cerr << "[ii] estimated error: " << cg.error() << std::endl; */
VectorDD X = VectorDD::Zero(2*ssize);
X = M.colPivHouseholderQr().solve(B);
for (unsigned int l=0; l<ssize; ++l) {
A1coefficients(l) = X(l);
A2coefficients(l) = X(l+ssize);
}
#else//USING_AMPC
MatrixDD M12 = MatrixDD::Zero(ssize, ssize);
VectorDD B1 = VectorDD::Zero(ssize);
M12 = C1 - D1 * (C2_inv * D2);
B1 = b1 - D1 * (C2_inv * b2);
VectorDD X1 = VectorDD::Zero(ssize);
//X1 = M12.colPivHouseholderQr().solve(B1);
X1 = M12.partialPivLu().solve(B1);
/* MatrixDD M21 = MatrixDD::Zero(ssize, ssize); */
/* VectorDD B2 = VectorDD::Zero(ssize); */
/* M21 = C2 - D2 * (C1_inv * D1); */
/* B2 = b2 - D2 * (C1_inv * b1); */
VectorDD X2 = VectorDD::Zero(ssize);
//X2 = M21.colPivHouseholderQr().solve(B2);
// X2 = C2inv * (b2 - D2 * X1)
X2 = C2_inv * (b2 - D2 * X1);
for (unsigned int l=0; l<ssize; ++l) {
A1coefficients(l) = X1(l);
A2coefficients(l) = X2(l);
}
#endif//USING_AMPC
#else//USING_EIGEN
#ifdef USING_AMPC//using complete matrix A
//
// ************ Slower, using full matrix ****************
//
ubmatrix M = ublas::zero_matrix<double>(2*ssize, 2*ssize);
ublas::range r0s = ublas::range(0, ssize);
ublas::range rs2s = ublas::range(ssize, 2*ssize);
project(M, r0s, r0s) = C1;
project(M, r0s, rs2s) = D1;
project(M, rs2s, r0s) = C2;
project(M, rs2s, rs2s) = D2;
ubvector B = ublas::zero_vector<double>(2*ssize);
project(B, r0s) = b1;
project(B, rs2s) = b2;
ublas::symmetric_adaptor<ublas::matrix<double>, ublas::lower> SM(M);
lapack::sysv(SM, B);
//lapack::gesv(M, B);
A1coefficients = project(B, r0s);
A2coefficients = project(B, rs2s);
#else//USING_AMPC using complete matrix A
//
// ************ Using reduced system ****************
//
ubmatrix M12 = ublas::zero_matrix<double>(ssize, ssize);
ubvector B1 = ublas::zero_vector<double>(ssize);
// WARNING future optimisation prod(A, temp_type(prod(B,C));
//M12 = C1 - D1 * ublas::prod(C2_inv, D2);
ubmatrix MTemp = ublas::prod(C2_inv, D2);
M12 = C1 - ublas::prod(D1, MTemp);
// B1 = b1 - D1 * (C2_inv * b2);
ubvector VTemp = ublas::prod(C2_inv, b2);
B1 = b1 - ublas::prod(D1, VTemp);
ubmatrix M21 = ublas::zero_matrix<double>(ssize, ssize);
ubvector B2 = ublas::zero_vector<double>(ssize);
//M21 = C2 - D2 * ublas::prod(C1_inv, D1);
MTemp = ublas::prod(C1_inv, D1);
M21 = C2 - ublas::prod(D2, MTemp);
//B2 = b2 - D2 * (C1_inv * b1);
VTemp = ublas::prod(C1_inv, b1);
B2 = b2 - ublas::prod(D2, VTemp);
// lapack::gesv(M12, B1);
// solve symmetric linear system of equations
/* lapack::gesv(M12, B1); */
ublas::symmetric_adaptor<ublas::matrix<double>, ublas::lower> SM12(M12);
lapack::sysv(SM12, B1);
// results
A1coefficients = B1;
// solve symmetric linear system of equations
ublas::symmetric_adaptor<ublas::matrix<double>, ublas::lower> SM21(M21);
lapack::sysv(SM21, B2);
A2coefficients = B2;
/* ubvector VTemp2 = ublas::zero_vector<double>(ssize); // = ublas::prod(D2, A1coefficients); */
/* ublas::axpy_prod(D2, A1coefficients, VTemp2, true); */
/* VTemp = b2 - VTemp2; */
/* //A2coefficients = ublas::prod(C2_inv, VTemp); */
/* ublas::axpy_prod(C2_inv, VTemp, A2coefficients, true); */
// std::cerr << "\n A1 " << A1coefficients << '\n';
/* std::cerr << "\n A2 " << A2coefficients << '\n'; */
#endif//USING_AMPC
#endif//USING_EIGEN
}
//! Bichoutskaia potential.
/*!
*
*As defined in equation A7 of
Lindgren, E. B., Chan, H.-K., Stace, A. J. & Besley, E.
Progress in the theory of electrostatic interactions between charged particles.
Phys. Chem. Chem. Phys. 18, 5883–5895 (2016)
\param r1 radius of particle 1.
\param r2 radius of particle 2.
\param q1 charge of particle 1.
\param q2 charge of particle 2.
\param h separation between particles.
\param eps the dielectric constant.
\param acoefficients the multipole moment coefficients
\
*/
inline
double mpc_potential(const double& rt, const double& r21, const double& q21,
const ubvector& A1coefficients, const ubvector& A2coefficients,
const double& eps) {
unsigned int size = A1coefficients.size();
double invK = 1.0/Kcoul;
double potc = Kcoul*q21/rt;
double epsm = eps-1.0;
double epsp = eps+1.0;
double pot1 = 0.0;
for(unsigned int mp=1; mp<size+1; ++mp) {
pot1 += A2coefficients(mp-1)*pow(r21/rt, mp+1);
//std::cerr << "\n pot1 " << pot1;
}
//std::cerr << "\n";
double pot2 = 0.0;
for(unsigned int lp=1; lp<size+1; ++lp) {
double prefac = (epsp*lp+1)/(epsm*lp);
pot2 += prefac * A1coefficients(lp-1) * A1coefficients(lp-1);
}
// NOTE 1/2 factor correction to potential as stated in
// 1.Stace, A. J. & Bichoutskaia, E. Reply to the ‘Comment on “Treating
// highly charged carbon and fullerene clusters as dielectric particles”’
// by H. Zettergren and H. Cederquist, Phys. Chem. Chem. Phys., 2012, 14,
// DOI: 10.1039/c2cp42883k. Phys. Chem. Chem. Phys. 14, 16771–16772 (2012).
/* std::cerr << '\n' << pot_coul << '\t' << pot_2 << '\t' << pot_3 << '\n'; */
return potc + 0.5*pot1 - 0.5*invK*pot2;
}
//! Bichoutskaia potential.
/*!
*
*As defined in equation A7 of
Lindgren, E. B., Chan, H.-K., Stace, A. J. & Besley, E.
Progress in the theory of electrostatic interactions between charged particles.
Phys. Chem. Chem. Phys. 18, 5883–5895 (2016)
\param r1 radius of particle 1.
\param r2 radius of particle 2.
\param q1 charge of particle 1.
\param q2 charge of particle 2.
\param h separation between particles.
\param eps the dielectric constant.
\param acoefficients the multipole moment coefficients
\
*/
inline
double potential_bichoutskaia(const double r1, const double q1,
const double r2, const double q2,
const double h,
const ubvector& acoefficients,
const double eps) {
// WARNING
return 1.0;
}
//! Bichoutskaia force.
/*!
*
*As defined in equation 5 of
Lindgren, E. B., Chan, H.-K., Stace, A. J. & Besley, E.
Progress in the theory of electrostatic interactions between charged particles.
Phys. Chem. Chem. Phys. 18, 5883–5895 (2016)
This is the scaled force.
\param A10 Multipolar coefficent 0.
\param A1coefficients Multipolar coefficients
*/
inline
double mpc_force(const double& A10,
const ubvector& A1coefficients,
double eps) {
unsigned int size = A1coefficients.size();
double invK = 1.0/Kcoul;
double epsm = eps-1.0;
double epsp = eps+1.0;
double force = 0.0;
// Populate new vector with A1 coefficients
ubvector Acoeffs = ublas::zero_vector<double>(size+1);
for(unsigned int m=1; m<size+1; ++m) {
Acoeffs(m) = A1coefficients(m-1);
}
Acoeffs(0) = A10;
for(unsigned int l=0; l<size; ++l) {
double prefac = (epsp*(l+1)+1)/epsm;
force += prefac*Acoeffs(l)*Acoeffs(l+1);
}
return -invK * force;
}
//! Bichoutskaia force.
/*!
* \param r1 radius of particle 1.
\param r2 radius of particle 2.
\param q1 charge of particle 1.
\param q2 charge of particle 2.
\param h separation between particles.
\param eps the dielectric constant.
\param acoefficients the multipole moment coefficients
\
*/
inline
double force_bichoutskaia(const double r1, const double q1,
const double r2, const double q2,
const double h,
const ubvector& acoefficients,
const double eps) {
// WARNING to complete
return -1.0;
}
inline
double relative_error(const double trueval, const double expval){
return abs((expval-trueval)/trueval);
}
inline
double max_relative_error(const double a, const double b){
return abs((a-b)/std::min(a,b));
}
struct potential_coulomb_funct
{
potential_coulomb_funct(double r21_, double q21_): r21(r21_), q21(q21_){
}
double operator()(double const& rt) {
return potential_coulomb_fact(rt, r21, q21);
}
double r21;
double q21;
};
struct potential_coulombvdw_funct
{
potential_coulombvdw_funct(double r1_,
double q1_,
double r2_,
double q2_,
double eps_,
double AH_,
double rc_,
double coulomb_=1.0,
double vdw_=1.0):
r1(r1_), q1(q1_),
r2(r2_), q2(q2_),
eps(eps_), AH(AH_),
rc(rc_), coulomb(coulomb_), vdw(vdw_){
}
double operator()(double const& r) {
return potential_coulombvdw(r, r1, q1, r2, q2, eps, AH, rc, coulomb, vdw);
}
double r1;
double q1;
double r2;
double q2;
double eps;
double AH;
double rc;
double coulomb;
double vdw;
};
struct force_coulombvdw_funct
{
force_coulombvdw_funct(double r1_,
double q1_,
double r2_,
double q2_,
double eps_,
double AH_,
double rc_,
double coulomb_=1.0,
double vdw_=1.0):
r1(r1_), q1(q1_),
r2(r2_), q2(q2_),
eps(eps_), AH(AH_),
rc(rc_), coulomb(coulomb_), vdw(vdw_){
}
double operator()(double const& r) {
return force_coulombvdw(r, r1, q1, r2, q2, eps, AH, rc, coulomb, vdw);
}
double r1;
double q1;
double r2;
double q2;
double eps;
double AH;
double rc;
double coulomb;
double vdw;
};
struct potential_ipa_funct
{
potential_ipa_funct(double r21_, double q21_, double eps_):
r21(r21_), q21(q21_), eps(eps_){
}
double operator()(double const& rt) {
return potential_ipa_fact(rt, r21, q21, eps);
}
double r21;
double q21;
double eps;
};
struct force_ipa_funct
{
force_ipa_funct(double r21_, double q21_, double eps_):
r21(r21_), q21(q21_), eps(eps_){
}
double operator()(double const& rt) {
return force_ipa_fact(rt, r21, q21, eps);
}
double r21;
double q21;
double eps;
};
struct potential_ipavdw_funct
{
potential_ipavdw_funct(double r1_,
double q1_,
double r2_,
double q2_,
double eps_,
double AH_,
double rc_,
double ipa_=1.0,
double vdw_=1.0):
r1(r1_), q1(q1_),
r2(r2_), q2(q2_),
eps(eps_), AH(AH_),
rc(rc_), ipa(ipa_), vdw(vdw_){
}
double operator()(double const& r) {
return potential_ipavdw(r, r1, q1, r2, q2, eps, AH, rc, ipa, vdw);
}
double r1;
double q1;
double r2;
double q2;
double eps;
double AH;
double rc;
double ipa;
double vdw;
};
struct force_ipavdw_funct
{
force_ipavdw_funct(double r1_,
double q1_,
double r2_,
double q2_,
double eps_,
double AH_,
double rc_,
double ipa_=1.0,
double vdw_=1.0):
r1(r1_), q1(q1_),
r2(r2_), q2(q2_),
eps(eps_), AH(AH_),
rc(rc_), ipa(ipa_), vdw(vdw_){
}
double operator()(double const& r) {
return force_ipavdw(r, r1, q1, r2, q2, eps, AH, rc, ipa, vdw);
}
double r1;
double q1;
double r2;
double q2;
double eps;
double AH;
double rc;
double ipa;
double vdw;
};
// multipolar coefficients potential functor
struct potential_mpc_funct
{
potential_mpc_funct(double r21_,
double q21_,
double eps_,
unsigned int nterms_):
r21(r21_), q21(q21_), eps(eps_),
nterms(nterms_) {
// system is nterms - 0th term
unsigned int ssize = nterms-1;
// 1, n-1 terms
A1coefficients = ublas::zero_vector<double>(ssize);
A2coefficients = ublas::zero_vector<double>(ssize);
// 0 terms
A10 = 0.0;
A20 = 0.0;
}
// constructor if coefficients are known
potential_mpc_funct(const ubvector &A1coefficients_,
const ubvector &A2coefficients_,
double A10_,
double A20_,
double eps_,
unsigned int nterms_):
A1coefficients(A1coefficients_),
A2coefficients(A2coefficients_),
eps(eps_),
nterms(nterms_) {
}
double operator()(double const& rt) {
// compute coefficients
compute_MPCoefficients(A1coefficients, A2coefficients,
A10, A20, rt, r21, q21, eps, nterms);
// compute Bichoutskaia force
return mpc_potential(rt, r21, q21, A1coefficients, A2coefficients, eps);
}
ubvector A1coefficients;
ubvector A2coefficients;
double A10;
double A20;
double r21;
double q21;
double eps;
unsigned int nterms;
};
// multipolar coefficients force functor
struct force_mpc_funct {
force_mpc_funct(double r21_,
double q21_,
double eps_,
unsigned int nterms_):
r21(r21_), q21(q21_), eps(eps_),
nterms(nterms_) {
// system is nterms - 0th term
unsigned int ssize = nterms-1;
// 1, n-1 terms
A1coefficients = ublas::zero_vector<double>(ssize);
A2coefficients = ublas::zero_vector<double>(ssize);
// 0 terms
A10 = 0.0;
A20 = 0.0;
}
// constructor if coefficients are known
force_mpc_funct(const ubvector &A1coefficients_,
const ubvector &A2coefficients_,
double A10_,
double A20_,
double eps_,
unsigned int nterms_):
A1coefficients(A1coefficients_),
A2coefficients(A2coefficients_),
eps(eps_),
nterms(nterms_) {
}
double operator()(double const& rt) {
// compute coefficients
compute_MPCoefficients(A1coefficients, A2coefficients,
A10, A20, rt, r21, q21, eps, nterms);
// compute Bichoutskaia force
return mpc_force(A10, A1coefficients, eps);
}
ubvector A1coefficients;
ubvector A2coefficients;
double A10;
double A20;
double r21;
double q21;
double eps;
unsigned int nterms;
#ifdef USING_EIGEN
/* MatrixDD */
#endif
};
// MPC + vdW
// multipolar coefficients potential functor
struct potential_mpcvdw_funct
{
potential_mpcvdw_funct(double r1_,
double q1_,
double r2_,
double q2_,
double eps_,
unsigned int nterms_,
double AH_,
double rc_,
double mpc_ = 1.0,
double vdw_ = 1.0):
r1(r1_), q1(q1_),
r2(r2_), q2(q2_),
eps(eps_), nterms(nterms_),
AH(AH_), rc(rc_),
mpc(mpc_), vdw(vdw_) {
update_parameters();
}
void update_parameters() {
// system is nterms - 0th term
unsigned int ssize = nterms-1;
// 1, n-1 terms
A1coefficients = ublas::zero_vector<double>(ssize);
A2coefficients = ublas::zero_vector<double>(ssize);
// 0 terms
A10 = 0.0;
A20 = 0.0;
// swap particles. We want to keep r21 < 1
if (r2 > r1) {
std::swap(r1, r2);
std::swap(q1, q2);
}
q21 = 0.0;
// for neutrals only vdW holds
// WARNING float comparison
if ((q1 == 0.0) && (q2 == 0.0)) {
mpc = 0.0;
q21 = 0.0;
}
else {
if (q1 == 0.0) {// p1 is neutral
std::swap(r1, r2);
std::swap(q1, q2);
q21 = 0.0;
}
else {// q1 != 0 and q2
q21 = q2/q1;
}
}
r21 = r2 / r1;
mpcfactor = mpc * q1 * q1 / r1;
vdwfactor = vdw * AH / 6.0;
}
// // constructor if coefficients are known
// potential_mpcvdw_funct(const ubvector &A1coefficients_,
// const ubvector &A2coefficients_,
// double A10_,
// double A20_,
// double eps_,
// unsigned int nterms_):
// A1coefficients(A1coefficients_),
// A2coefficients(A2coefficients_),
// eps(eps_),
// nterms(nterms_) {
// }
double operator()(double const& r) {
rt = r / r1;
mpcpot = 0.0;
vdwpot = 0.0;
if (mpc > 0.0) {// mpc + vdw
// compute coefficients
compute_MPCoefficients(A1coefficients, A2coefficients,
A10, A20, rt, r21, q21, eps, nterms);
mpcpot = mpcfactor * mpc_potential(rt, r21, q21, A1coefficients, A2coefficients, eps);
}
if (vdw > 0.0) {
// cutoff
if (r > r1+r2+rc){
rs = r/r1;
}
else {
rs = (r1+r2+rc)/r1;
}
vdwpot = vdwfactor * potential_vdw_nodim(rs, r21);
}
return mpcpot + vdwpot;
}
ubvector A1coefficients;
ubvector A2coefficients;
double A10;
double A20;
double rt;
double rs;
double mpcfactor;
double vdwfactor;
double r1;
double q1;
double r2;
double q2;
double r21;
double q21;
double eps;
unsigned int nterms;
double AH;
double rc;
double mpc;
double vdw;
double mpcpot;
double vdwpot;
};
// multipolar coefficients force functor
struct force_mpcvdw_funct {
force_mpcvdw_funct(double r1_,
double q1_,
double r2_,
double q2_,
double eps_,
unsigned int nterms_,
double AH_,
double rc_,
double mpc_ = 1.0,
double vdw_ = 1.0):
r1(r1_), q1(q1_),
r2(r2_), q2(q2_),
eps(eps_), nterms(nterms_),
AH(AH_), rc(rc_),
mpc(mpc_), vdw(vdw_) {
update_parameters();
}
void update_parameters() {
// system is nterms - 0th term
unsigned int ssize = nterms-1;
// 1, n-1 terms
A1coefficients = ublas::zero_vector<double>(ssize);
A2coefficients = ublas::zero_vector<double>(ssize);
// 0 terms
A10 = 0.0;
A20 = 0.0;
// swap particles. We want to keep r21 < 1
if (r2 > r1) {
std::swap(r1, r2);
std::swap(q1, q2);
}
q21 = 0.0;
// for neutrals only vdW holds
// WARNING float comparison
if ((q1 == 0.0) && (q2 == 0.0)) {
mpc = 0.0;
q21 = 0.0;
}
else {
if (q1 == 0.0) {// p1 is neutral
std::swap(r1, r2);
std::swap(q1, q2);
q21 = 0.0;
}
else {// q1 != 0 and q2
q21 = q2/q1;
}
}
r21 = r2 / r1;
mpcfactor = mpc * q1 * q1 / (r1*r1);
vdwfactor = vdw * AH / (6.0*r1);
}
// constructor if coefficients are known
force_mpcvdw_funct(const ubvector &A1coefficients_,
const ubvector &A2coefficients_,
double A10_,
double A20_,
double eps_,
unsigned int nterms_):
A1coefficients(A1coefficients_),
A2coefficients(A2coefficients_),
eps(eps_),
nterms(nterms_) {
}
double operator()(double const& r) {
rt = r / r1;
mpcforce = 0.0;
vdwforce = 0.0;
if (mpc > 0.0) {// mpc + vdw
// compute coefficients
compute_MPCoefficients(A1coefficients, A2coefficients,
A10, A20, rt, r21, q21, eps, nterms);
mpcforce = mpcfactor * mpc_force(A10, A1coefficients, eps);
}
if (vdw > 0.0) {
// cutoff
if (r > r1+r2+rc){
rs = r/r1;
}
else {
rs = (r1+r2+rc)/r1;
}
vdwforce = vdwfactor * force_vdw_nodim(rs, r21);
}
return mpcforce + vdwforce;
}
ubvector A1coefficients;
ubvector A2coefficients;
double A10;
double A20;
double rt;
double rs;
double mpcfactor;
double vdwfactor;
double r1;
double q1;
double r2;
double q2;
double r21;
double q21;
double eps;
unsigned int nterms;
double AH;
double rc;
double mpc;
double vdw;
double mpcforce;
double vdwforce;
};
// MPC + vdW
// compute eta factor for a pair of particles
inline
double efactor_mpc(double r1, double r2,
double q1, double q2,
const double eps, const double temperature,
const unsigned int nterms) {
// for numerical stability, accuracy and fast convergence we want r21 < 1.0
if(r2>r1){
double raux = r2;
r2 = r1;
r1 = raux;
double qaux = q2;
q2 = q1;
q1 = qaux;
}
double r21 = r2/r1;
// WARNING float comparison
// avoid division by 0
if(q1==0.0){
// q1=0, permute particle 1 with 2
double raux = r2;
r2 = r1;
r1 = raux;
double qaux = q2;
q2 = q1;
q1 = qaux;
r21 = r2/r1;
}
double q21 = q2/q1;
// max rx
double max = 1.0e-5/r1;
// scaled contact radii
double rt = 1.0 + r21;
double min = rt;
// Find scaled potential at contact and nterms
unsigned int curr_nterms = nterms;
unsigned int step_nterms = 10;
unsigned int iter = 0;
unsigned int max_iter = 20;
double pmpc_rt;
double pmpc_rt_prev = 0.0;
while (true) {
potential_mpc_funct pmpcfunct(r21, q21, eps,
curr_nterms);
pmpc_rt = pmpcfunct(rt);
if ((iter>0) && (max_relative_error(pmpc_rt, pmpc_rt_prev)<PotRE)) {
std::cerr << "\n[II] Convergence achieved for N = " << curr_nterms;
break;
}
if (iter>max_iter) {
std::cerr << "\n[EE] Max iterations = " << iter;
std::cerr << "\n[EE] Number of terms = " << curr_nterms;
break;
}
pmpc_rt_prev = pmpc_rt;
curr_nterms += step_nterms;
++iter;
}
// WARNING copy instance
potential_mpc_funct pmpcfunct(r21, q21, eps,
curr_nterms);
double potprefactor = q1*q1*eCharge*eCharge/r1;
// Potential at contact
double phi_rt = potprefactor*pmpc_rt;
if(pmpc_rt>0){
double eta = exp(-phi_rt/(Kboltz*temperature));
std::cerr << "\n[ii] Repulsive phi = " << phi_rt;
/* if(phi_rt < 0){ */
/* std::terminate(); */
/* } */
return eta;
}
else {// potential is negative (attractive) or zero at contact
// force functor
force_mpc_funct forcempcfunct(r21, q21, eps, curr_nterms);
// Force at contact
double forcempc_rt = forcempcfunct(rt);
// Force at r max
double forcempc_max = forcempcfunct(max);
// checks if force is monotonically decreasing [non bracketed]
if(forcempc_rt*forcempc_max >= 0.0){
std::cerr << "\n[ii] Attractive phi = " << phi_rt;
double eta = 1.0 - phi_rt /(Kboltz*temperature);
/* if(phi_rt > 0){ */
/* std::terminate(); */
/* } */
return eta;
}
else {// find maximum, zero of force is bracketed between rt and max
std::cerr << "\n[ii] Mixed phi_rt = " << phi_rt;
std::pair<double, double> pair_pmpc;
boost::uintmax_t bmax_iter = 1000;
tools::eps_tolerance<double> tol(30);
try {
//#pragma omp critical
//{
pair_pmpc = tools::toms748_solve(forcempcfunct, min, max, forcempc_rt, forcempc_max, tol, bmax_iter);
//pair_pmpc = tools::toms748_solve(forcempcfunct, rt, max, tol, max_iter);
// std::cerr << "\n" << rt << "\t" << max << "\t" << forcempcfunct(rt) << "\t" << forcempcfunct(max);
//}
}
catch(const std::exception& exc) {
//pair_pmpc.first = 0.0;
//pair_pmpc.second = 0.0;
std::cerr << '\n' << exc.what() << '\n';
//int nst = 50;
//double st = (max-rt)/(nst-1);
//for(int i=0; i<50; ++i){
// double rr = rt + st*i;
// std::cerr << "\n" << rr << "\t" << pmpcfunct(rr) << "\t" << forcempcfunct(rr);
//}
//std::cerr << '\n';
std::terminate();
//return 1.0 - phi_rt /(Kboltz*temperature);
}
if(max_iter > 990){
std::cerr << "\n ERROR max iter " << max_iter << "\n\n";
std::terminate();
}
double rbarrier = 0.5*(pair_pmpc.first+pair_pmpc.second);
if(rbarrier>=0){
double phimax = potprefactor * pmpcfunct(rbarrier);
double eta = exp(-phimax/(Kboltz*temperature))
*(1.0+(phimax-phi_rt)/(Kboltz*temperature));
return eta;
}
else{
std::cerr << "\n ERROR Negative rbarrier " << rbarrier << '\n';
std::terminate();
}
}
}
return 1.0;
}
// compute ipa enhancement factor for a pair of particles
// WARNING using pmpc instead of pipa
inline
double efactor_ipa2(double r1, double r2,
double q1, double q2,
double eps, double temperature) {
double r21 = r2/r1;
double q21 = q2/q1;
// check q1==0
if(fabs(q1)<1.e-200){
// q1=0, permute particle 1 with 2
double raux = r2;
r2 = r1;
r1 = raux;
double qaux = q2;
q2 = q1;
q1 = qaux;
r21 = r2/r1;
q21 = 0.0;
}
double max = 500.0;
boost::uintmax_t max_iter = 1000;
tools::eps_tolerance<double> tol(30);
double rt = 1.0 + r21;
double min = rt;
// potential function
potential_ipa_funct pipafunct(r21, q21, eps);
//double pipa = pipafunct(rt);
potential_mpc_funct pmpcfunct(r21, q21, eps, 25);
double pmpc = pmpcfunct(rt);
// force function
force_ipa_funct forceipafunct(r21, q21, eps);
std::pair<double, double> pair_pipa;
bool failed = false;
try {
// pair_pipa = tools::toms748_solve(pipafunct, min, max, tol, max_iter);
pair_pipa = tools::toms748_solve(forceipafunct, min, max, tol, max_iter);
}
catch(const std::exception& e) {
failed = true;
pair_pipa.first = 0.0;
// std::cout << "\nMessage from thrown exception was:\n " << e.what() << std::endl;
// std::cout << "\nmax_iter = " << max_iter;
}
bool attcontact = (pmpc<0.0? true: false);
bool fullatt = attcontact && failed;
bool withphimax = !failed || attcontact;
double potprefactor = q1*q1*eCharge*eCharge/r1;
double eta = 0.0;
double phimin = potprefactor*pmpc;
if(withphimax){
// double phimax = potprefactor*pair_pipa.first;
double phimax = 0.0;
if (pair_pipa.first>0.0){
// NOTE could be better midpoint
// pair_pipa.first + (pair_pipa.second - pair_pipa.first)/2;
phimax = potprefactor*pmpcfunct(pair_pipa.first);
}
eta = exp(-phimax/(Kboltz*temperature))
*(1.0+(phimax-phimin)/(Kboltz*temperature));
// std::cout << std::endl
// << "\t pipa.first = " << pair_pipa.first
// << "\t pipa.second = " << pair_pipa.second
// << "\t max_iter = " << max_iter;
}
if(fullatt){
eta = 1.0 - phimin /(Kboltz*temperature);
}
if(!attcontact){
eta = exp(-phimin/(Kboltz*temperature));
}
// std::cout << std::endl << "r21 = " << r21 << "\tq21 = " << q21
// << "\t phimax = " << potprefactor*pair_pipa.first
// << "\t phimin = " << phimin << "\tcontact = " << attcontact
// << "\tfullatt = " << fullatt << "\teta = " << eta;
return eta;
}
// compute ipa enhancement factor for a pair of particles
// WARNING using pmpc instead of pipa
// we use ipa force to find the location of the barrier if
// the mpc potential is attractive at contact
// then we compute the enhancement factor using the mpc potential
// at contact and at the barrier determined by zeroing the ipa force
inline
double efactor_ipa(double r1, double r2,
double q1, double q2,
const double eps, const double temperature,
const unsigned int nterms) {
if(r2>10.0*r1){
double raux = r2;
r2 = r1;
r1 = raux;
double qaux = q2;
q2 = q1;
q1 = qaux;
}
double r21 = r2/r1;
double q21 = q2/q1;
// WARNING float comparison
if(q1==0.0){
// q1=0, permute particle 1 with 2
double raux = r2;
r2 = r1;
r1 = raux;
double qaux = q2;
q2 = q1;
q1 = qaux;
r21 = r2/r1;
q21 = 0.0;
}
double max = 1.0e-5/r1;
double rt = 1.0 + r21;
double min = rt;
// // ipa functor
// potential_ipa_funct pipafunct(r21, q21, eps);
// // ipa at contact
// double pipa_rt = pipafunct(rt)
// mpc functor
potential_mpc_funct pmpcfunct(r21, q21, eps, nterms);
// mpc at contact
double pmpc_rt = pmpcfunct(rt);
// potential prefactor
double potprefactor = q1*q1*eCharge*eCharge/r1;
// Potential at contact in mks
double phi_rt = potprefactor*pmpc_rt;
// repulsive potential
if(pmpc_rt>0){
double eta = exp(-phi_rt/(Kboltz*temperature));
/* if(phi_rt < 0){ */
/* std::terminate(); */
/* } */
return eta;
}
else {// potential is negative (attractive) or zero at contact
// force functor
force_ipa_funct forceipafunct(r21, q21, eps);
// Force at contact
double forceipa_rt = forceipafunct(rt);
// Force at r max
double forceipa_max = forceipafunct(max);
// checks if force is monotonically decreasing [non bracketed]
if(forceipa_rt*forceipa_max >= 0.0){
double eta = 1.0 - phi_rt /(Kboltz*temperature);
/* if(phi_rt > 0){ */
/* std::terminate(); */
/* } */
return eta;
}
else {// find maximum, zero of force is bracketed between rt and max
std::pair<double, double> pair_pipa;
boost::uintmax_t max_iter = 1000;
tools::eps_tolerance<double> tol(30);
try {
//#pragma omp critical
//{
pair_pipa = tools::toms748_solve(forceipafunct, min, max, forceipa_rt, forceipa_max, tol, max_iter);
//pair_pipa = tools::toms748_solve(forcepipafunct, rt, max, tol, max_iter);
// std::cerr << "\n" << rt << "\t" << max << "\t" <<
//}
}
catch(const std::exception& exc) {
std::cerr << '\n' << exc.what() << '\n';
std::terminate();
}
if(max_iter > 990){
std::cerr << "\n ERROR max iter " << max_iter << "\n\n";
std::terminate();
}
double rbarrier = 0.5*(pair_pipa.first+pair_pipa.second);
if(rbarrier>=0){
double phimax = potprefactor * pmpcfunct(rbarrier);
double eta = exp(-phimax/(Kboltz*temperature))
*(1.0+(phimax-phi_rt)/(Kboltz*temperature));
return eta;
}
else{
std::cerr << "\n ERROR Negative rbarrier " << rbarrier << '\n';
std::terminate();
}
}
}
return 1.0;
}
}
#endif//EINT_H
|
mttkrp.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "base.h"
#include "mttkrp.h"
#include "thd_info.h"
#include "tile.h"
#include "util.h"
#include "mutex_pool.h"
/* XXX: this is a memory leak until cpd_ws is added/freed. */
static mutex_pool * pool = NULL;
/**
* @brief Function pointer that performs MTTKRP on a tile of a CSF tree.
*
* @param ct The CSF tensor.
* @param tile_id The tile to process.
* @param mats The matrices.
* @param mode The output mode.
* @param thds Thread structures.
* @param partition A partitioning of the slices in the tensor, to distribute
* to threads. Use the thread ID to decide which slices to
* process. This may be NULL, in that case simply process all
* slices.
*/
typedef void (* csf_mttkrp_func)(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition);
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Perform a reduction on thread-local MTTKRP outputs.
*
* @param ws MTTKRP workspace containing thread-local outputs.
* @param global_output The global MTTKRP output we are reducing into.
* @param nrows The number of rows in the MTTKRP.
* @param ncols The number of columns in the MTTKRP.
*/
static void p_reduce_privatized(
splatt_mttkrp_ws * const ws,
val_t * const restrict global_output,
idx_t const nrows,
idx_t const ncols)
{
/* Ensure everyone has completed their local MTTKRP. */
#pragma omp barrier
sp_timer_t reduction_timer;
timer_fstart(&reduction_timer);
int const tid = splatt_omp_get_thread_num();
idx_t const num_threads = splatt_omp_get_num_threads();
idx_t const elem_per_thread = (nrows * ncols) / num_threads;
idx_t const start = tid * elem_per_thread;
idx_t const stop = ((idx_t)tid == num_threads-1) ?
(nrows * ncols) : (tid + 1) * elem_per_thread;
/* reduction */
for(idx_t t=0; t < num_threads; ++t){
val_t const * const restrict thread_buf = ws->privatize_buffer[t];
for(idx_t x=start; x < stop; ++x) {
global_output[x] += thread_buf[x];
}
}
timer_stop(&reduction_timer);
#pragma omp master
ws->reduction_time = reduction_timer.seconds;
}
/**
* @brief Map MTTKRP functions onto a (possibly tiled) CSF tensor. This function
* will handle any scheduling required with a partially tiled tensor.
*
* @param tensors An array of CSF representations. tensors[csf_id] is processed.
* @param csf_id Which tensor are we processing?
* @param atomic_func An MTTKRP function which atomically updates the output.
* @param nosync_func An MTTKRP function which does not atomically update.
* @param mats The matrices, with the output stored in mats[MAX_NMODES].
* @param mode Which mode of 'tensors' is the output (not CSF depth).
* @param thds Thread structures.
* @param ws MTTKRP workspace.
*/
static void p_schedule_tiles(
splatt_csf const * const tensors,
idx_t const csf_id,
csf_mttkrp_func atomic_func,
csf_mttkrp_func nosync_func,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
splatt_mttkrp_ws * const ws)
{
splatt_csf const * const csf = &(tensors[csf_id]);
idx_t const nmodes = csf->nmodes;
idx_t const depth = nmodes - 1;
idx_t const nrows = mats[mode]->I;
idx_t const ncols = mats[mode]->J;
/* Store old pointer */
val_t * const restrict global_output = mats[MAX_NMODES]->vals;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
timer_start(&thds[tid].ttime);
idx_t const * const tile_partition = ws->tile_partition[csf_id];
idx_t const * const tree_partition = ws->tree_partition[csf_id];
/*
* We may need to edit mats[MAX_NMODES]->vals, so create a private copy of
* the pointers to edit. (NOT actual factors).
*/
matrix_t * mats_priv[MAX_NMODES+1];
for(idx_t m=0; m < MAX_NMODES; ++m) {
mats_priv[m] = mats[m];
}
/* each thread gets separate structure, but do a shallow copy */
mats_priv[MAX_NMODES] = splatt_malloc(sizeof(**mats_priv));
*(mats_priv[MAX_NMODES]) = *(mats[MAX_NMODES]);
/* Give each thread its own private buffer and overwrite atomic
* function. */
if(ws->is_privatized[mode]) {
/* change (thread-private!) output structure */
memset(ws->privatize_buffer[tid], 0,
nrows * ncols * sizeof(**(ws->privatize_buffer)));
mats_priv[MAX_NMODES]->vals = ws->privatize_buffer[tid];
/* Don't use atomics if we privatized. */
atomic_func = nosync_func;
}
/*
* Distribute tiles to threads in some fashion.
*/
if(csf->ntiles > 1) {
/* We parallelize across tiles, and thus should not distribute within a
* tree. This may change if we instead 'split' tiles across a few
* threads. */
assert(tree_partition == NULL);
/* mode is actually tiled -- avoid synchronization */
if(csf->tile_dims[mode] > 1) {
idx_t tile_id = 0;
/* foreach layer of tiles */
#pragma omp for schedule(dynamic, 1) nowait
for(idx_t t=0; t < csf->tile_dims[mode]; ++t) {
tile_id =
get_next_tileid(TILE_BEGIN, csf->tile_dims, nmodes, mode, t);
while(tile_id != TILE_END) {
nosync_func(csf, tile_id, mats_priv, mode, thds, tree_partition);
tile_id =
get_next_tileid(tile_id, csf->tile_dims, nmodes, mode, t);
}
}
/* tiled, but not this mode. Atomics are still necessary. */
} else {
for(idx_t tile_id = tile_partition[tid];
tile_id < tile_partition[tid+1]; ++tile_id) {
atomic_func(csf, tile_id, mats_priv, mode, thds, tree_partition);
}
}
/*
* Untiled, parallelize within kernel.
*/
} else {
assert(tree_partition != NULL);
atomic_func(csf, 0, mats_priv, mode, thds, tree_partition);
}
timer_stop(&thds[tid].ttime);
/* If we used privatization, perform a reduction. */
if(ws->is_privatized[mode]) {
p_reduce_privatized(ws, global_output, nrows, ncols);
}
splatt_free(mats_priv[MAX_NMODES]);
} /* end omp parallel */
/* restore pointer */
mats[MAX_NMODES]->vals = global_output;
}
/**
* @brief Should a certain mode should be privatized to avoid locks?
*
* @param csf The tensor (just used for dimensions).
* @param mode The mode we are processing.
* @param opts Options, storing the # threads and the threshold.
*
* @return true, if we should privatize.
*/
static bool p_is_privatized(
splatt_csf const * const csf,
idx_t const mode,
double const * const opts)
{
idx_t const length = csf->dims[mode];
idx_t const nthreads = (idx_t) opts[SPLATT_OPTION_NTHREADS];
double const thresh = opts[SPLATT_OPTION_PRIVTHRESH];
/* don't bother if it is not multithreaded. */
if(nthreads == 1) {
return false;
}
return (double)(length * nthreads) <= (thresh * (double)csf->nnz);
}
static inline void p_add_hada_clear(
val_t * const restrict out,
val_t * const restrict a,
val_t const * const restrict b,
idx_t const nfactors)
{
for(idx_t f=0; f < nfactors; ++f) {
out[f] += a[f] * b[f];
a[f] = 0;
}
}
static inline void p_assign_hada(
val_t * const restrict out,
val_t const * const restrict a,
val_t const * const restrict b,
idx_t const nfactors)
{
for(idx_t f=0; f < nfactors; ++f) {
out[f] = a[f] * b[f];
}
}
static inline void p_csf_process_fiber_locked(
val_t * const leafmat,
val_t const * const restrict accumbuf,
idx_t const nfactors,
idx_t const start,
idx_t const end,
idx_t const * const restrict inds,
val_t const * const restrict vals)
{
for(idx_t jj=start; jj < end; ++jj) {
val_t * const restrict leafrow = leafmat + (inds[jj] * nfactors);
val_t const v = vals[jj];
mutex_set_lock(pool, inds[jj]);
for(idx_t f=0; f < nfactors; ++f) {
leafrow[f] += v * accumbuf[f];
}
mutex_unset_lock(pool, inds[jj]);
}
}
static inline void p_csf_process_fiber_nolock(
val_t * const leafmat,
val_t const * const restrict accumbuf,
idx_t const nfactors,
idx_t const start,
idx_t const end,
idx_t const * const restrict inds,
val_t const * const restrict vals)
{
for(idx_t jj=start; jj < end; ++jj) {
val_t * const restrict leafrow = leafmat + (inds[jj] * nfactors);
val_t const v = vals[jj];
for(idx_t f=0; f < nfactors; ++f) {
leafrow[f] += v * accumbuf[f];
}
}
}
static inline void p_csf_process_fiber(
val_t * const restrict accumbuf,
idx_t const nfactors,
val_t const * const leafmat,
idx_t const start,
idx_t const end,
idx_t const * const inds,
val_t const * const vals)
{
/* foreach nnz in fiber */
for(idx_t j=start; j < end; ++j) {
val_t const v = vals[j] ;
val_t const * const restrict row = leafmat + (nfactors * inds[j]);
for(idx_t f=0; f < nfactors; ++f) {
accumbuf[f] += v * row[f];
}
}
}
static inline void p_propagate_up(
val_t * const out,
val_t * const * const buf,
idx_t * const restrict idxstack,
idx_t const init_depth,
idx_t const init_idx,
idx_t const * const * const fp,
idx_t const * const * const fids,
val_t const * const restrict vals,
val_t ** mvals,
idx_t const nmodes,
idx_t const nfactors)
{
/* push initial idx initialize idxstack */
idxstack[init_depth] = init_idx;
for(idx_t m=init_depth+1; m < nmodes; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
assert(init_depth < nmodes-1);
/* clear out accumulation buffer */
for(idx_t f=0; f < nfactors; ++f) {
buf[init_depth+1][f] = 0;
}
while(idxstack[init_depth+1] < fp[init_depth][init_idx+1]) {
/* skip to last internal mode */
idx_t depth = nmodes - 2;
/* process all nonzeros [start, end) into buf[depth]*/
idx_t const start = fp[depth][idxstack[depth]];
idx_t const end = fp[depth][idxstack[depth]+1];
p_csf_process_fiber(buf[depth+1], nfactors, mvals[depth+1],
start, end, fids[depth+1], vals);
idxstack[depth+1] = end;
/* exit early if there is no propagation to do... */
if(init_depth == nmodes-2) {
for(idx_t f=0; f < nfactors; ++f) {
out[f] = buf[depth+1][f];
}
return;
}
/* Propagate up until we reach a node with more children to process */
do {
/* propagate result up and clear buffer for next sibling */
val_t const * const restrict fibrow
= mvals[depth] + (fids[depth][idxstack[depth]] * nfactors);
p_add_hada_clear(buf[depth], buf[depth+1], fibrow, nfactors);
++idxstack[depth];
--depth;
} while(depth > init_depth &&
idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
/* copy to out */
for(idx_t f=0; f < nfactors; ++f) {
out[f] = buf[init_depth+1][f];
}
}
static void p_csf_mttkrp_root3_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 1)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
/* write to output */
val_t * const restrict writeF = (val_t *) thds[tid].scratch[2];
for(idx_t r=0; r < nfactors; ++r) {
writeF[r] = 0.;
}
/* break up loop by partition */
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
val_t * const restrict mv = ovals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update to M */
val_t const * const restrict av = avals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
writeF[r] += accumF[r] * av[r];
}
} /* foreach fiber */
/* flush to output */
for(idx_t r=0; r < nfactors; ++r) {
mv[r] += writeF[r];
writeF[r] = 0.;
}
} /* foreach slice (tree) */
}
static void p_csf_mttkrp_root3_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 1)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
/* write to output */
val_t * const restrict writeF = (val_t *) thds[tid].scratch[2];
for(idx_t r=0; r < nfactors; ++r) {
writeF[r] = 0.;
}
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update to M */
val_t const * const restrict av = avals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
writeF[r] += accumF[r] * av[r];
}
}
idx_t const fid = (sids == NULL) ? s : sids[s];
val_t * const restrict mv = ovals + (fid * nfactors);
/* flush to output */
mutex_set_lock(pool, fid);
for(idx_t r=0; r < nfactors; ++r) {
mv[r] += writeF[r];
writeF[r] = 0.;
}
mutex_unset_lock(pool, fid);
}
}
static void p_csf_mttkrp_intl3_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
/* root row */
val_t const * const restrict rv = avals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] += v * bv[r];
}
}
/* write to fiber row */
val_t * const restrict ov = ovals + (fids[f] * nfactors);
mutex_set_lock(pool, fids[f]);
for(idx_t r=0; r < nfactors; ++r) {
ov[r] += rv[r] * accumF[r];
}
mutex_unset_lock(pool, fids[f]);
}
}
}
static void p_csf_mttkrp_leaf3_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 1)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
/* root row */
val_t const * const restrict rv = avals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* fill fiber with hada */
val_t const * const restrict av = bvals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = rv[r] * av[r];
}
/* foreach nnz in fiber, scale with hada and write to ovals */
for(idx_t jj=fptr[f]; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t * const restrict ov = ovals + (inds[jj] * nfactors);
mutex_set_lock(pool, inds[jj]);
for(idx_t r=0; r < nfactors; ++r) {
ov[r] += v * accumF[r];
}
mutex_unset_lock(pool, inds[jj]);
}
}
}
}
static void p_csf_mttkrp_root_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
/* extract tensor structures */
idx_t const nmodes = ct->nmodes;
val_t const * const vals = ct->pt[tile_id].vals;
/* empty tile, just return */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_root3_nolock(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
memset(buf[m], 0, nfactors * sizeof(val_t));
}
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfibs = ct->pt[tile_id].nfibs[0];
assert(nfibs <= mats[MAX_NMODES]->I);
/* break up loop by partition */
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nfibs;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
assert(fid < mats[MAX_NMODES]->I);
p_propagate_up(buf[0], buf, idxstack, 0, s, fp, fids,
vals, mvals, nmodes, nfactors);
val_t * const restrict orow = ovals + (fid * nfactors);
val_t const * const restrict obuf = buf[0];
mutex_set_lock(pool, fid);
for(idx_t f=0; f < nfactors; ++f) {
orow[f] += obuf[f];
}
mutex_unset_lock(pool, fid);
} /* end foreach outer slice */
}
static void p_csf_mttkrp_root_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
/* extract tensor structures */
idx_t const nmodes = ct->nmodes;
val_t const * const vals = ct->pt[tile_id].vals;
/* empty tile, just return */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_root3_locked(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
memset(buf[m], 0, nfactors * sizeof(val_t));
}
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfibs = ct->pt[tile_id].nfibs[0];
assert(nfibs <= mats[MAX_NMODES]->I);
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nfibs;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
assert(fid < mats[MAX_NMODES]->I);
p_propagate_up(buf[0], buf, idxstack, 0, s, fp, fids,
vals, mvals, nmodes, nfactors);
val_t * const restrict orow = ovals + (fid * nfactors);
val_t const * const restrict obuf = buf[0];
mutex_set_lock(pool, fid);
for(idx_t f=0; f < nfactors; ++f) {
orow[f] += obuf[f];
}
mutex_unset_lock(pool, fid);
} /* end foreach outer slice */
}
static void p_csf_mttkrp_leaf3_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 1)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
/* root row */
val_t const * const restrict rv = avals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* fill fiber with hada */
val_t const * const restrict av = bvals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = rv[r] * av[r];
}
/* foreach nnz in fiber, scale with hada and write to ovals */
for(idx_t jj=fptr[f]; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t * const restrict ov = ovals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
ov[r] += v * accumF[r];
}
}
}
}
}
static void p_csf_mttkrp_leaf_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const nmodes = ct->nmodes;
/* pass empty tiles */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_leaf3_nolock(ct, tile_id, mats, mode, thds, partition);
return;
}
/* extract tensor structures */
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
}
/* foreach outer slice */
idx_t const nouter = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nouter;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
idxstack[0] = s;
/* clear out stale data */
for(idx_t m=1; m < nmodes-1; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
/* first buf will always just be a matrix row */
val_t const * const rootrow = mvals[0] + (fid*nfactors);
val_t * const rootbuf = buf[0];
for(idx_t f=0; f < nfactors; ++f) {
rootbuf[f] = rootrow[f];
}
idx_t depth = 0;
idx_t const outer_end = fp[0][s+1];
while(idxstack[1] < outer_end) {
/* move down to an nnz node */
for(; depth < nmodes-2; ++depth) {
/* propogate buf down */
val_t const * const restrict drow
= mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors);
p_assign_hada(buf[depth+1], buf[depth], drow, nfactors);
}
/* process all nonzeros [start, end) */
idx_t const start = fp[depth][idxstack[depth]];
idx_t const end = fp[depth][idxstack[depth]+1];
p_csf_process_fiber_nolock(mats[MAX_NMODES]->vals, buf[depth],
nfactors, start, end, fids[depth+1], vals);
/* now move back up to the next unprocessed child */
do {
++idxstack[depth];
--depth;
} while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
} /* end outer slice loop */
}
static void p_csf_mttkrp_leaf_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
/* extract tensor structures */
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const nmodes = ct->nmodes;
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_leaf3_locked(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
}
/* foreach outer slice */
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
idxstack[0] = s;
/* clear out stale data */
for(idx_t m=1; m < nmodes-1; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
/* first buf will always just be a matrix row */
val_t const * const restrict rootrow = mvals[0] + (fid*nfactors);
val_t * const rootbuf = buf[0];
for(idx_t f=0; f < nfactors; ++f) {
rootbuf[f] = rootrow[f];
}
idx_t depth = 0;
idx_t const outer_end = fp[0][s+1];
while(idxstack[1] < outer_end) {
/* move down to an nnz node */
for(; depth < nmodes-2; ++depth) {
/* propogate buf down */
val_t const * const restrict drow
= mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors);
p_assign_hada(buf[depth+1], buf[depth], drow, nfactors);
}
/* process all nonzeros [start, end) */
idx_t const start = fp[depth][idxstack[depth]];
idx_t const end = fp[depth][idxstack[depth]+1];
p_csf_process_fiber_locked(mats[MAX_NMODES]->vals, buf[depth],
nfactors, start, end, fids[depth+1], vals);
/* now move back up to the next unprocessed child */
do {
++idxstack[depth];
--depth;
} while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
} /* end outer slice loop */
}
static void p_csf_mttkrp_intl3_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
/* root row */
val_t const * const restrict rv = avals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] += v * bv[r];
}
}
/* write to fiber row */
val_t * const restrict ov = ovals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
ov[r] += rv[r] * accumF[r];
}
}
}
}
static void p_csf_mttkrp_intl_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
/* extract tensor structures */
idx_t const nmodes = ct->nmodes;
val_t const * const vals = ct->pt[tile_id].vals;
/* pass empty tiles */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_intl3_nolock(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
/* find out which level in the tree this is */
idx_t const outdepth = csf_mode_to_depth(ct, mode);
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
memset(buf[m], 0, nfactors * sizeof(val_t));
}
val_t * const ovals = mats[MAX_NMODES]->vals;
/* foreach outer slice */
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
/* push outer slice and fill stack */
idxstack[0] = s;
for(idx_t m=1; m <= outdepth; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
/* fill first buf */
val_t const * const restrict rootrow = mvals[0] + (fid*nfactors);
for(idx_t f=0; f < nfactors; ++f) {
buf[0][f] = rootrow[f];
}
/* process entire subtree */
idx_t depth = 0;
while(idxstack[1] < fp[0][s+1]) {
/* propagate values down to outdepth-1 */
for(; depth < outdepth; ++depth) {
val_t const * const restrict drow
= mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors);
p_assign_hada(buf[depth+1], buf[depth], drow, nfactors);
}
/* write to output and clear buf[outdepth] for next subtree */
idx_t const noderow = fids[outdepth][idxstack[outdepth]];
/* propagate value up to buf[outdepth] */
p_propagate_up(buf[outdepth], buf, idxstack, outdepth,idxstack[outdepth],
fp, fids, vals, mvals, nmodes, nfactors);
val_t * const restrict outbuf = ovals + (noderow * nfactors);
p_add_hada_clear(outbuf, buf[outdepth], buf[outdepth-1], nfactors);
/* backtrack to next unfinished node */
do {
++idxstack[depth];
--depth;
} while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
} /* end foreach outer slice */
}
static void p_csf_mttkrp_intl_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
/* extract tensor structures */
idx_t const nmodes = ct->nmodes;
val_t const * const vals = ct->pt[tile_id].vals;
/* pass empty tiles */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_intl3_locked(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
/* find out which level in the tree this is */
idx_t const outdepth = csf_mode_to_depth(ct, mode);
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
memset(buf[m], 0, nfactors * sizeof(val_t));
}
val_t * const ovals = mats[MAX_NMODES]->vals;
/* foreach outer slice */
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
/* push outer slice and fill stack */
idxstack[0] = s;
for(idx_t m=1; m <= outdepth; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
/* fill first buf */
val_t const * const restrict rootrow = mvals[0] + (fid*nfactors);
for(idx_t f=0; f < nfactors; ++f) {
buf[0][f] = rootrow[f];
}
/* process entire subtree */
idx_t depth = 0;
while(idxstack[1] < fp[0][s+1]) {
/* propagate values down to outdepth-1 */
for(; depth < outdepth; ++depth) {
val_t const * const restrict drow
= mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors);
p_assign_hada(buf[depth+1], buf[depth], drow, nfactors);
}
/* write to output and clear buf[outdepth] for next subtree */
idx_t const noderow = fids[outdepth][idxstack[outdepth]];
/* propagate value up to buf[outdepth] */
p_propagate_up(buf[outdepth], buf, idxstack, outdepth,idxstack[outdepth],
fp, fids, vals, mvals, nmodes, nfactors);
val_t * const restrict outbuf = ovals + (noderow * nfactors);
mutex_set_lock(pool, noderow);
p_add_hada_clear(outbuf, buf[outdepth], buf[outdepth-1], nfactors);
mutex_unset_lock(pool, noderow);
/* backtrack to next unfinished node */
do {
++idxstack[depth];
--depth;
} while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
} /* end foreach outer slice */
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
void mttkrp_csf(
splatt_csf const * const tensors,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
splatt_mttkrp_ws * const ws,
double const * const opts)
{
/* ensure we use as many threads as our partitioning supports */
splatt_omp_set_num_threads(ws->num_threads);
if(pool == NULL) {
pool = mutex_alloc();
}
/* clear output matrix */
matrix_t * const M = mats[MAX_NMODES];
M->I = tensors[0].dims[mode];
memset(M->vals, 0, M->I * M->J * sizeof(val_t));
idx_t const nmodes = tensors[0].nmodes;
/* reset thread times */
thd_reset(thds, splatt_omp_get_max_threads());
/* choose which MTTKRP function to use */
idx_t const which_csf = ws->mode_csf_map[mode];
idx_t const outdepth = csf_mode_to_depth(&(tensors[which_csf]), mode);
if(outdepth == 0) {
/* root */
p_schedule_tiles(tensors, which_csf,
p_csf_mttkrp_root_locked, p_csf_mttkrp_root_nolock,
mats, mode, thds, ws);
} else if(outdepth == nmodes - 1) {
/* leaf */
p_schedule_tiles(tensors, which_csf,
p_csf_mttkrp_leaf_locked, p_csf_mttkrp_leaf_nolock,
mats, mode, thds, ws);
} else {
/* internal */
p_schedule_tiles(tensors, which_csf,
p_csf_mttkrp_intl_locked, p_csf_mttkrp_intl_nolock,
mats, mode, thds, ws);
}
/* print thread times, if requested */
if((int)opts[SPLATT_OPTION_VERBOSITY] == SPLATT_VERBOSITY_MAX) {
printf("MTTKRP mode %"SPLATT_PF_IDX": ", mode+1);
thd_time_stats(thds, splatt_omp_get_max_threads());
if(ws->is_privatized[mode]) {
printf(" reduction-time: %0.3fs\n", ws->reduction_time);
}
}
thd_reset(thds, splatt_omp_get_max_threads());
}
/******************************************************************************
* DEPRECATED FUNCTIONS
*****************************************************************************/
/******************************************************************************
* SPLATT MTTKRP
*****************************************************************************/
void mttkrp_splatt(
ftensor_t const * const ft,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const nthreads)
{
if(ft->tiled == SPLATT_SYNCTILE) {
mttkrp_splatt_sync_tiled(ft, mats, mode, thds, nthreads);
return;
}
if(ft->tiled == SPLATT_COOPTILE) {
mttkrp_splatt_coop_tiled(ft, mats, mode, thds, nthreads);
return;
}
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mats[ft->dim_perm[1]];
matrix_t const * const B = mats[ft->dim_perm[2]];
idx_t const nslices = ft->dims[mode];
idx_t const rank = M->J;
val_t * const mvals = M->vals;
memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t));
val_t const * const avals = A->vals;
val_t const * const bvals = B->vals;
idx_t const * const restrict sptr = ft->sptr;
idx_t const * const restrict fptr = ft->fptr;
idx_t const * const restrict fids = ft->fids;
idx_t const * const restrict inds = ft->inds;
val_t const * const restrict vals = ft->vals;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
timer_start(&thds[tid].ttime);
#pragma omp for schedule(dynamic, 16) nowait
for(idx_t s=0; s < nslices; ++s) {
val_t * const restrict mv = mvals + (s * rank);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update to M */
val_t const * const restrict av = avals + (fids[f] * rank);
for(idx_t r=0; r < rank; ++r) {
mv[r] += accumF[r] * av[r];
}
}
}
timer_stop(&thds[tid].ttime);
} /* end parallel region */
}
void mttkrp_splatt_sync_tiled(
ftensor_t const * const ft,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const nthreads)
{
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mats[ft->dim_perm[1]];
matrix_t const * const B = mats[ft->dim_perm[2]];
idx_t const nslabs = ft->nslabs;
idx_t const rank = M->J;
val_t * const mvals = M->vals;
memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t));
val_t const * const avals = A->vals;
val_t const * const bvals = B->vals;
idx_t const * const restrict slabptr = ft->slabptr;
idx_t const * const restrict sids = ft->sids;
idx_t const * const restrict fptr = ft->fptr;
idx_t const * const restrict fids = ft->fids;
idx_t const * const restrict inds = ft->inds;
val_t const * const restrict vals = ft->vals;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
timer_start(&thds[tid].ttime);
#pragma omp for schedule(dynamic, 1) nowait
for(idx_t s=0; s < nslabs; ++s) {
/* foreach fiber in slice */
for(idx_t f=slabptr[s]; f < slabptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update to M */
val_t * const restrict mv = mvals + (sids[f] * rank);
val_t const * const restrict av = avals + (fids[f] * rank);
for(idx_t r=0; r < rank; ++r) {
mv[r] += accumF[r] * av[r];
}
}
}
timer_stop(&thds[tid].ttime);
} /* end parallel region */
}
void mttkrp_splatt_coop_tiled(
ftensor_t const * const ft,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const nthreads)
{
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mats[ft->dim_perm[1]];
matrix_t const * const B = mats[ft->dim_perm[2]];
idx_t const nslabs = ft->nslabs;
idx_t const rank = M->J;
val_t * const mvals = M->vals;
memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t));
val_t const * const avals = A->vals;
val_t const * const bvals = B->vals;
idx_t const * const restrict slabptr = ft->slabptr;
idx_t const * const restrict sptr = ft->sptr;
idx_t const * const restrict sids = ft->sids;
idx_t const * const restrict fptr = ft->fptr;
idx_t const * const restrict fids = ft->fids;
idx_t const * const restrict inds = ft->inds;
val_t const * const restrict vals = ft->vals;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
val_t * const localm = (val_t *) thds[tid].scratch[1];
timer_start(&thds[tid].ttime);
/* foreach slab */
for(idx_t s=0; s < nslabs; ++s) {
/* foreach fiber in slab */
#pragma omp for schedule(dynamic, 8)
for(idx_t sl=slabptr[s]; sl < slabptr[s+1]; ++sl) {
idx_t const slice = sids[sl];
for(idx_t f=sptr[sl]; f < sptr[sl+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update thread-local M */
val_t * const restrict mv = localm + ((slice % TILE_SIZES[0]) * rank);
val_t const * const restrict av = avals + (fids[f] * rank);
for(idx_t r=0; r < rank; ++r) {
mv[r] += accumF[r] * av[r];
}
}
}
idx_t const start = s * TILE_SIZES[0];
idx_t const stop = SS_MIN((s+1) * TILE_SIZES[0], ft->dims[mode]);
#pragma omp for schedule(static)
for(idx_t i=start; i < stop; ++i) {
/* map i back to global slice id */
idx_t const localrow = i % TILE_SIZES[0];
for(idx_t t=0; t < nthreads; ++t) {
val_t * const threadm = (val_t *) thds[t].scratch[1];
for(idx_t r=0; r < rank; ++r) {
mvals[r + (i*rank)] += threadm[r + (localrow*rank)];
threadm[r + (localrow*rank)] = 0.;
}
}
}
} /* end foreach slab */
timer_stop(&thds[tid].ttime);
} /* end omp parallel */
}
/******************************************************************************
* GIGA MTTKRP
*****************************************************************************/
void mttkrp_giga(
spmatrix_t const * const spmat,
matrix_t ** mats,
idx_t const mode,
val_t * const scratch)
{
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mode == 0 ? mats[1] : mats[0];
matrix_t const * const B = mode == 2 ? mats[1] : mats[2];
idx_t const I = spmat->I;
idx_t const rank = M->J;
idx_t const * const restrict rowptr = spmat->rowptr;
idx_t const * const restrict colind = spmat->colind;
val_t const * const restrict vals = spmat->vals;
#pragma omp parallel
{
for(idx_t r=0; r < rank; ++r) {
val_t * const restrict mv = M->vals + (r * I);
val_t const * const restrict av = A->vals + (r * A->I);
val_t const * const restrict bv = B->vals + (r * B->I);
/* Joined Hadamard products of X, C, and B */
#pragma omp for schedule(dynamic, 16)
for(idx_t i=0; i < I; ++i) {
for(idx_t y=rowptr[i]; y < rowptr[i+1]; ++y) {
idx_t const a = colind[y] / B->I;
idx_t const b = colind[y] % B->I;
scratch[y] = vals[y] * av[a] * bv[b];
}
}
/* now accumulate rows into column of M1 */
#pragma omp for schedule(dynamic, 16)
for(idx_t i=0; i < I; ++i) {
val_t sum = 0;
for(idx_t y=rowptr[i]; y < rowptr[i+1]; ++y) {
sum += scratch[y];
}
mv[i] = sum;
}
}
}
}
/******************************************************************************
* TTBOX MTTKRP
*****************************************************************************/
void mttkrp_ttbox(
sptensor_t const * const tt,
matrix_t ** mats,
idx_t const mode,
val_t * const scratch)
{
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mode == 0 ? mats[1] : mats[0];
matrix_t const * const B = mode == 2 ? mats[1] : mats[2];
idx_t const I = tt->dims[mode];
idx_t const rank = M->J;
memset(M->vals, 0, I * rank * sizeof(val_t));
idx_t const nnz = tt->nnz;
idx_t const * const restrict indM = tt->ind[mode];
idx_t const * const restrict indA =
mode == 0 ? tt->ind[1] : tt->ind[0];
idx_t const * const restrict indB =
mode == 2 ? tt->ind[1] : tt->ind[2];
val_t const * const restrict vals = tt->vals;
for(idx_t r=0; r < rank; ++r) {
val_t * const restrict mv = M->vals + (r * I);
val_t const * const restrict av = A->vals + (r * A->I);
val_t const * const restrict bv = B->vals + (r * B->I);
/* stretch out columns of A and B */
#pragma omp parallel for
for(idx_t x=0; x < nnz; ++x) {
scratch[x] = vals[x] * av[indA[x]] * bv[indB[x]];
}
/* now accumulate into m1 */
for(idx_t x=0; x < nnz; ++x) {
mv[indM[x]] += scratch[x];
}
}
}
void mttkrp_stream(
sptensor_t const * const tt,
matrix_t ** mats,
idx_t const mode)
{
if(pool == NULL) {
pool = mutex_alloc();
}
matrix_t * const M = mats[MAX_NMODES];
idx_t const I = tt->dims[mode];
idx_t const nfactors = M->J;
val_t * const outmat = M->vals;
memset(outmat, 0, I * nfactors * sizeof(*outmat));
idx_t const nmodes = tt->nmodes;
val_t * mvals[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[m]->vals;
}
val_t const * const restrict vals = tt->vals;
#pragma omp parallel
{
val_t * restrict accum = splatt_malloc(nfactors * sizeof(*accum));
/* stream through nnz */
#pragma omp for schedule(static)
for(idx_t n=0; n < tt->nnz; ++n) {
/* initialize with value */
for(idx_t f=0; f < nfactors; ++f) {
accum[f] = vals[n];
}
for(idx_t m=0; m < nmodes; ++m) {
if(m == mode) {
continue;
}
val_t const * const restrict inrow = mvals[m] + \
(tt->ind[m][n] * nfactors);
for(idx_t f=0; f < nfactors; ++f) {
accum[f] *= inrow[f];
}
}
/* write to output */
idx_t const out_ind = tt->ind[mode][n];
val_t * const restrict outrow = outmat + (tt->ind[mode][n] * nfactors);
mutex_set_lock(pool, out_ind);
for(idx_t f=0; f < nfactors; ++f) {
outrow[f] += accum[f];
}
mutex_unset_lock(pool, out_ind);
}
splatt_free(accum);
} /* end omp parallel */
}
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
int splatt_mttkrp(
splatt_idx_t const mode,
splatt_idx_t const ncolumns,
splatt_csf const * const tensors,
splatt_val_t ** matrices,
splatt_val_t * const matout,
double const * const options)
{
idx_t const nmodes = tensors->nmodes;
/* fill matrix pointers */
matrix_t * mats[MAX_NMODES+1];
for(idx_t m=0; m < nmodes; ++m) {
mats[m] = (matrix_t *) splatt_malloc(sizeof(matrix_t));
mats[m]->I = tensors->dims[m];
mats[m]->J = ncolumns,
mats[m]->rowmajor = 1;
mats[m]->vals = matrices[m];
}
mats[MAX_NMODES] = (matrix_t *) splatt_malloc(sizeof(matrix_t));
mats[MAX_NMODES]->I = tensors->dims[mode];
mats[MAX_NMODES]->J = ncolumns;
mats[MAX_NMODES]->rowmajor = 1;
mats[MAX_NMODES]->vals = matout;
/* Setup thread structures. + 64 bytes is to avoid false sharing. */
idx_t const nthreads = (idx_t) options[SPLATT_OPTION_NTHREADS];
splatt_omp_set_num_threads(nthreads);
thd_info * thds = thd_init(nthreads, 3,
(nmodes * ncolumns * sizeof(val_t)) + 64,
0,
(nmodes * ncolumns * sizeof(val_t)) + 64);
splatt_mttkrp_ws * ws = splatt_mttkrp_alloc_ws(tensors, ncolumns, options);
/* do the MTTKRP */
mttkrp_csf(tensors, mats, mode, thds, ws, options);
splatt_mttkrp_free_ws(ws);
/* cleanup */
thd_free(thds, nthreads);
for(idx_t m=0; m < nmodes; ++m) {
free(mats[m]);
}
free(mats[MAX_NMODES]);
return SPLATT_SUCCESS;
}
splatt_mttkrp_ws * splatt_mttkrp_alloc_ws(
splatt_csf const * const tensors,
splatt_idx_t const ncolumns,
double const * const opts)
{
splatt_mttkrp_ws * ws = splatt_malloc(sizeof(*ws));
idx_t num_csf = 0;
#ifdef _OPENMP
idx_t const num_threads = (idx_t) opts[SPLATT_OPTION_NTHREADS];
#else
idx_t const num_threads = 1;
#endif
ws->num_threads = num_threads;
/* map each MTTKRP mode to a CSF tensor */
splatt_csf_type which_csf = (splatt_csf_type) opts[SPLATT_OPTION_CSF_ALLOC];
for(idx_t m=0; m < tensors->nmodes; ++m) {
switch(which_csf) {
case SPLATT_CSF_ONEMODE:
/* only one tensor, map is easy */
ws->mode_csf_map[m] = 0;
num_csf = 1;
break;
case SPLATT_CSF_TWOMODE:
/* last mode is mapped to second tensor */
ws->mode_csf_map[m] = 0;
if(csf_mode_to_depth(&(tensors[0]), m) == tensors->nmodes-1) {
ws->mode_csf_map[m] = 1;
}
num_csf = 2;
break;
case SPLATT_CSF_ALLMODE:
/* each mode has its own tensor, map is easy */
ws->mode_csf_map[m] = m;
num_csf = tensors->nmodes;
break;
/* XXX */
default:
fprintf(stderr, "SPLATT: CSF type '%d' not recognized.\n", which_csf);
abort();
break;
}
}
assert(num_csf > 0);
ws->num_csf = num_csf;
/* Now setup partition info for each CSF. */
for(idx_t c=0; c < num_csf; ++c) {
ws->tile_partition[c] = NULL;
ws->tree_partition[c] = NULL;
}
for(idx_t c=0; c < num_csf; ++c) {
splatt_csf const * const csf = &(tensors[c]);
if(tensors[c].ntiles > 1) {
ws->tile_partition[c] = csf_partition_tiles_1d(csf, num_threads);
} else {
ws->tree_partition[c] = csf_partition_1d(csf, 0, num_threads);
}
}
/* allocate privatization buffer */
idx_t largest_priv_dim = 0;
ws->privatize_buffer =
splatt_malloc(num_threads * sizeof(*(ws->privatize_buffer)));
for(idx_t m=0; m < tensors->nmodes; ++m) {
ws->is_privatized[m] = p_is_privatized(tensors, m, opts);
if(ws->is_privatized[m]) {
largest_priv_dim = SS_MAX(largest_priv_dim, tensors->dims[m]);
if((int)opts[SPLATT_OPTION_VERBOSITY] == SPLATT_VERBOSITY_MAX) {
printf("PRIVATIZING-MODE: %"SPLATT_PF_IDX"\n", m+1);
}
}
}
for(idx_t t=0; t < num_threads; ++t) {
ws->privatize_buffer[t] = splatt_malloc(largest_priv_dim * ncolumns *
sizeof(**(ws->privatize_buffer)));
}
if(largest_priv_dim > 0 &&
(int)opts[SPLATT_OPTION_VERBOSITY] == SPLATT_VERBOSITY_MAX) {
size_t bytes = num_threads * largest_priv_dim * ncolumns *
sizeof(**(ws->privatize_buffer));
char * bstr = bytes_str(bytes);
printf("PRIVATIZATION-BUF: %s\n", bstr);
printf("\n");
free(bstr);
}
return ws;
}
void splatt_mttkrp_free_ws(
splatt_mttkrp_ws * const ws)
{
for(idx_t t=0; t < ws->num_threads; ++t) {
splatt_free(ws->privatize_buffer[t]);
}
splatt_free(ws->privatize_buffer);
for(idx_t c=0; c < ws->num_csf; ++c) {
splatt_free(ws->tile_partition[c]);
splatt_free(ws->tree_partition[c]);
}
splatt_free(ws);
}
|
TSDFVoxelGridImpl.h | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <atomic>
#include <cmath>
#include "open3d/core/Dispatch.h"
#include "open3d/core/Dtype.h"
#include "open3d/core/MemoryManager.h"
#include "open3d/core/SizeVector.h"
#include "open3d/core/Tensor.h"
#include "open3d/t/geometry/Utility.h"
#include "open3d/t/geometry/kernel/GeometryIndexer.h"
#include "open3d/t/geometry/kernel/GeometryMacros.h"
#include "open3d/t/geometry/kernel/TSDFVoxel.h"
#include "open3d/t/geometry/kernel/TSDFVoxelGrid.h"
#include "open3d/utility/Logging.h"
#include "open3d/utility/Timer.h"
namespace open3d {
namespace t {
namespace geometry {
namespace kernel {
namespace tsdf {
#if defined(__CUDACC__)
void IntegrateCUDA
#else
void IntegrateCPU
#endif
(const core::Tensor& depth,
const core::Tensor& color,
const core::Tensor& indices,
const core::Tensor& block_keys,
core::Tensor& block_values,
// Transforms
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
// Parameters
int64_t resolution,
float voxel_size,
float sdf_trunc,
float depth_scale,
float depth_max) {
// Parameters
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
TransformIndexer transform_indexer(intrinsics, extrinsics, voxel_size);
// Real data indexer
NDArrayIndexer depth_indexer(depth, 2);
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
// Optional color integration
NDArrayIndexer color_indexer;
bool integrate_color = false;
if (color.NumElements() != 0) {
color_indexer = NDArrayIndexer(color, 2);
integrate_color = true;
}
// Plain arrays that does not require indexers
const int* indices_ptr = indices.GetDataPtr<int>();
int64_t n = indices.GetLength() * resolution3;
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
core::ParallelFor(
depth.GetDevice(), n,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int block_idx =
indices_ptr[workload_idx / resolution3];
int voxel_idx = workload_idx % resolution3;
/// Coordinate transform
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(
block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv,
&zv);
// coordinate in world (in voxel)
int64_t x = (xb * resolution + xv);
int64_t y = (yb * resolution + yv);
int64_t z = (zb * resolution + zv);
// coordinate in camera (in voxel -> in meter)
float xc, yc, zc, u, v;
transform_indexer.RigidTransform(
static_cast<float>(x),
static_cast<float>(y),
static_cast<float>(z), &xc, &yc, &zc);
// coordinate in image (in pixel)
transform_indexer.Project(xc, yc, zc, &u, &v);
if (!depth_indexer.InBoundary(u, v)) {
return;
}
// Associate image workload and compute SDF and
// TSDF.
float depth = *depth_indexer.GetDataPtr<float>(
static_cast<int64_t>(u),
static_cast<int64_t>(v)) /
depth_scale;
float sdf = (depth - zc);
if (depth <= 0 || depth > depth_max || zc <= 0 ||
sdf < -sdf_trunc) {
return;
}
sdf = sdf < sdf_trunc ? sdf : sdf_trunc;
sdf /= sdf_trunc;
// Associate voxel workload and update TSDF/Weights
voxel_t* voxel_ptr =
voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(xv, yv, zv,
block_idx);
if (integrate_color) {
float* color_ptr =
color_indexer.GetDataPtr<float>(
static_cast<int64_t>(u),
static_cast<int64_t>(v));
voxel_ptr->Integrate(sdf, color_ptr[0],
color_ptr[1],
color_ptr[2]);
} else {
voxel_ptr->Integrate(sdf);
}
});
});
#if defined(__CUDACC__)
core::cuda::Synchronize();
#endif
}
#if defined(__CUDACC__)
void ExtractSurfacePointsCUDA
#else
void ExtractSurfacePointsCPU
#endif
(const core::Tensor& indices,
const core::Tensor& nb_indices,
const core::Tensor& nb_masks,
const core::Tensor& block_keys,
const core::Tensor& block_values,
core::Tensor& points,
utility::optional<std::reference_wrapper<core::Tensor>> normals,
utility::optional<std::reference_wrapper<core::Tensor>> colors,
int64_t resolution,
float voxel_size,
float weight_threshold,
int& valid_size) {
// Parameters
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
// Real data indexer
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer nb_block_masks_indexer(nb_masks, 2);
NDArrayIndexer nb_block_indices_indexer(nb_indices, 2);
// Plain arrays that does not require indexers
const int64_t* indices_ptr = indices.GetDataPtr<int64_t>();
int64_t n_blocks = indices.GetLength();
int64_t n = n_blocks * resolution3;
// Output
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {1}, core::Int32,
block_values.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
if (valid_size < 0) {
utility::LogWarning(
"No estimated max point cloud size provided, using a 2-pass "
"estimation. Surface extraction could be slow.");
// This pass determines valid number of points.
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
core::ParallelFor(
indices.GetDevice(), n,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo,
int zo,
int curr_block_idx)
-> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx,
// voxel_idx)
int64_t workload_block_idx =
workload_idx / resolution3;
int64_t block_idx =
indices_ptr[workload_block_idx];
int64_t voxel_idx = workload_idx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv,
&yv, &zv);
voxel_t* voxel_ptr =
voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(xv, yv, zv,
block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float weight_o = voxel_ptr->GetWeight();
if (weight_o <= weight_threshold) return;
// Enumerate x-y-z directions
for (int i = 0; i < 3; ++i) {
voxel_t* ptr = GetVoxelAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(
workload_block_idx));
if (ptr == nullptr) continue;
float tsdf_i = ptr->GetTSDF();
float weight_i = ptr->GetWeight();
if (weight_i > weight_threshold &&
tsdf_i * tsdf_o < 0) {
OPEN3D_ATOMIC_ADD(count_ptr, 1);
}
}
});
});
#if defined(__CUDACC__)
valid_size = count[0].Item<int>();
count[0] = 0;
#else
valid_size = (*count_ptr).load();
(*count_ptr) = 0;
#endif
}
int max_count = valid_size;
if (points.GetLength() == 0) {
points = core::Tensor({max_count, 3}, core::Float32,
block_values.GetDevice());
}
NDArrayIndexer point_indexer(points, 1);
// Normals
bool extract_normal = false;
NDArrayIndexer normal_indexer;
if (normals.has_value()) {
extract_normal = true;
if (normals.value().get().GetLength() == 0) {
normals.value().get() = core::Tensor({max_count, 3}, core::Float32,
block_values.GetDevice());
}
normal_indexer = NDArrayIndexer(normals.value().get(), 1);
}
// This pass extracts exact surface points.
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
// Colors
bool extract_color = false;
NDArrayIndexer color_indexer;
if (voxel_t::HasColor() && colors.has_value()) {
extract_color = true;
if (colors.value().get().GetLength() == 0) {
colors.value().get() =
core::Tensor({max_count, 3}, core::Float32,
block_values.GetDevice());
}
color_indexer = NDArrayIndexer(colors.value().get(), 1);
}
core::ParallelFor(
indices.GetDevice(), n,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
auto GetVoxelAt =
[&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
auto GetNormalAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx,
float* n) {
return DeviceGetNormalAt<voxel_t>(
xo, yo, zo, curr_block_idx, n,
static_cast<int>(resolution),
voxel_size, nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx =
workload_idx / resolution3;
int64_t block_idx = indices_ptr[workload_block_idx];
int64_t voxel_idx = workload_idx % resolution3;
/// Coordinate transform
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(
block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv,
&zv);
voxel_t* voxel_ptr =
voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(xv, yv, zv,
block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float weight_o = voxel_ptr->GetWeight();
if (weight_o <= weight_threshold) return;
int64_t x = xb * resolution + xv;
int64_t y = yb * resolution + yv;
int64_t z = zb * resolution + zv;
float no[3] = {0}, ni[3] = {0};
if (extract_normal) {
GetNormalAt(
static_cast<int>(xv),
static_cast<int>(yv),
static_cast<int>(zv),
static_cast<int>(workload_block_idx),
no);
}
// Enumerate x-y-z axis
for (int i = 0; i < 3; ++i) {
voxel_t* ptr = GetVoxelAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(workload_block_idx));
if (ptr == nullptr) continue;
float tsdf_i = ptr->GetTSDF();
float weight_i = ptr->GetWeight();
if (weight_i > weight_threshold &&
tsdf_i * tsdf_o < 0) {
float ratio =
(0 - tsdf_o) / (tsdf_i - tsdf_o);
int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
if (idx >= valid_size) {
printf("Point cloud size larger than "
"estimated, please increase the "
"estimation!\n");
return;
}
float* point_ptr =
point_indexer.GetDataPtr<float>(
idx);
point_ptr[0] = voxel_size *
(x + ratio * int(i == 0));
point_ptr[1] = voxel_size *
(y + ratio * int(i == 1));
point_ptr[2] = voxel_size *
(z + ratio * int(i == 2));
if (extract_color) {
float* color_ptr =
color_indexer.GetDataPtr<float>(
idx);
float r_o = voxel_ptr->GetR();
float g_o = voxel_ptr->GetG();
float b_o = voxel_ptr->GetB();
float r_i = ptr->GetR();
float g_i = ptr->GetG();
float b_i = ptr->GetB();
color_ptr[0] = ((1 - ratio) * r_o +
ratio * r_i) /
255.0f;
color_ptr[1] = ((1 - ratio) * g_o +
ratio * g_i) /
255.0f;
color_ptr[2] = ((1 - ratio) * b_o +
ratio * b_i) /
255.0f;
}
if (extract_normal) {
GetNormalAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(
workload_block_idx),
ni);
float* normal_ptr =
normal_indexer
.GetDataPtr<float>(idx);
float nx = (1 - ratio) * no[0] +
ratio * ni[0];
float ny = (1 - ratio) * no[1] +
ratio * ni[1];
float nz = (1 - ratio) * no[2] +
ratio * ni[2];
float norm = static_cast<float>(
sqrt(nx * nx + ny * ny +
nz * nz) +
1e-5);
normal_ptr[0] = nx / norm;
normal_ptr[1] = ny / norm;
normal_ptr[2] = nz / norm;
}
}
}
});
});
#if defined(__CUDACC__)
int total_count = count.Item<int>();
#else
int total_count = (*count_ptr).load();
#endif
utility::LogDebug("{} vertices extracted", total_count);
valid_size = total_count;
#if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
core::cuda::Synchronize();
#endif
}
#if defined(__CUDACC__)
void ExtractSurfaceMeshCUDA
#else
void ExtractSurfaceMeshCPU
#endif
(const core::Tensor& indices,
const core::Tensor& inv_indices,
const core::Tensor& nb_indices,
const core::Tensor& nb_masks,
const core::Tensor& block_keys,
const core::Tensor& block_values,
core::Tensor& vertices,
core::Tensor& triangles,
utility::optional<std::reference_wrapper<core::Tensor>> normals,
utility::optional<std::reference_wrapper<core::Tensor>> colors,
int64_t resolution,
float voxel_size,
float weight_threshold,
int& vertex_count) {
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
int n_blocks = static_cast<int>(indices.GetLength());
// TODO(wei): profile performance by replacing the table to a hashmap.
// Voxel-wise mesh info. 4 channels correspond to:
// 3 edges' corresponding vertex index + 1 table index.
core::Tensor mesh_structure;
try {
mesh_structure = core::Tensor::Zeros(
{n_blocks, resolution, resolution, resolution, 4}, core::Int32,
block_keys.GetDevice());
} catch (const std::runtime_error&) {
utility::LogError(
"[MeshExtractionKernel] Unable to allocate assistance mesh "
"structure for Marching "
"Cubes with {} active voxel blocks. Please consider using a "
"larger voxel size (currently {}) for TSDF "
"integration, or using tsdf_volume.cpu() to perform mesh "
"extraction on CPU.",
n_blocks, voxel_size);
}
// Real data indexer
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer mesh_structure_indexer(mesh_structure, 4);
NDArrayIndexer nb_block_masks_indexer(nb_masks, 2);
NDArrayIndexer nb_block_indices_indexer(nb_indices, 2);
// Plain arrays that does not require indexers
const int64_t* indices_ptr = indices.GetDataPtr<int64_t>();
const int64_t* inv_indices_ptr = inv_indices.GetDataPtr<int64_t>();
int64_t n = n_blocks * resolution3;
int64_t voxel_bytesize = voxel_block_buffer_indexer.ElementByteSize();
// Pass 0: analyze mesh structure, set up one-on-one correspondences
// from edges to vertices.
DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() {
core::ParallelFor(
indices.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t widx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Check per-vertex sign in the cube to determine cube
// type
int table_idx = 0;
for (int i = 0; i < 8; ++i) {
voxel_t* voxel_ptr_i = GetVoxelAt(
static_cast<int>(xv) + vtx_shifts[i][0],
static_cast<int>(yv) + vtx_shifts[i][1],
static_cast<int>(zv) + vtx_shifts[i][2],
static_cast<int>(workload_block_idx));
if (voxel_ptr_i == nullptr) return;
float tsdf_i = voxel_ptr_i->GetTSDF();
float weight_i = voxel_ptr_i->GetWeight();
if (weight_i <= weight_threshold) return;
table_idx |= ((tsdf_i < 0) ? (1 << i) : 0);
}
int* mesh_struct_ptr =
mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
mesh_struct_ptr[3] = table_idx;
if (table_idx == 0 || table_idx == 255) return;
// Check per-edge sign determine the cube type
int edges_with_vertices = edge_table[table_idx];
for (int i = 0; i < 12; ++i) {
if (edges_with_vertices & (1 << i)) {
int64_t xv_i = xv + edge_shifts[i][0];
int64_t yv_i = yv + edge_shifts[i][1];
int64_t zv_i = zv + edge_shifts[i][2];
int edge_i = edge_shifts[i][3];
int dxb = static_cast<int>(xv_i / resolution);
int dyb = static_cast<int>(yv_i / resolution);
int dzb = static_cast<int>(zv_i / resolution);
int nb_idx =
(dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
int64_t block_idx_i =
*nb_block_indices_indexer
.GetDataPtr<int64_t>(
workload_block_idx,
nb_idx);
int* mesh_ptr_i =
mesh_structure_indexer.GetDataPtr<int>(
xv_i - dxb * resolution,
yv_i - dyb * resolution,
zv_i - dzb * resolution,
inv_indices_ptr[block_idx_i]);
// Non-atomic write, but we are safe
mesh_ptr_i[edge_i] = -1;
}
}
});
});
// Pass 1: determine valid number of vertices (if not preset)
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {}, core::Int32,
block_values.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
if (vertex_count < 0) {
core::ParallelFor(
indices.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t widx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr =
mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
// Early quit -- no allocated vertex to compute
if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
mesh_struct_ptr[2] != -1) {
return;
}
// Enumerate 3 edges in the voxel
for (int e = 0; e < 3; ++e) {
int vertex_idx = mesh_struct_ptr[e];
if (vertex_idx != -1) continue;
OPEN3D_ATOMIC_ADD(count_ptr, 1);
}
});
#if defined(__CUDACC__)
vertex_count = count.Item<int>();
#else
vertex_count = (*count_ptr).load();
#endif
}
utility::LogDebug("Total vertex count = {}", vertex_count);
vertices = core::Tensor({vertex_count, 3}, core::Float32,
block_values.GetDevice());
bool extract_normal = false;
NDArrayIndexer normal_indexer;
if (normals.has_value()) {
extract_normal = true;
normals.value().get() = core::Tensor({vertex_count, 3}, core::Float32,
block_values.GetDevice());
normal_indexer = NDArrayIndexer(normals.value().get(), 1);
}
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer vertex_indexer(vertices, 1);
#if defined(__CUDACC__)
count = core::Tensor(std::vector<int>{0}, {}, core::Int32,
block_values.GetDevice());
count_ptr = count.GetDataPtr<int>();
#else
(*count_ptr) = 0;
#endif
// Pass 2: extract vertices.
DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() {
bool extract_color = false;
NDArrayIndexer color_indexer;
if (voxel_t::HasColor() && colors.has_value()) {
extract_color = true;
colors.value().get() = core::Tensor(
{vertex_count, 3}, core::Float32, block_values.GetDevice());
color_indexer = NDArrayIndexer(colors.value().get(), 1);
}
core::ParallelFor(
indices.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t widx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo,
int curr_block_idx,
float* n) {
return DeviceGetNormalAt<voxel_t>(
xo, yo, zo, curr_block_idx, n,
static_cast<int>(resolution), voxel_size,
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t block_idx = indices_ptr[workload_block_idx];
int64_t voxel_idx = widx % resolution3;
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// global coordinate (in voxels)
int64_t x = xb * resolution + xv;
int64_t y = yb * resolution + yv;
int64_t z = zb * resolution + zv;
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr =
mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
// Early quit -- no allocated vertex to compute
if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
mesh_struct_ptr[2] != -1) {
return;
}
// Obtain voxel ptr
voxel_t* voxel_ptr =
voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
xv, yv, zv, block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float no[3] = {0}, ne[3] = {0};
if (extract_normal) {
GetNormalAt(static_cast<int>(xv), static_cast<int>(yv),
static_cast<int>(zv),
static_cast<int>(workload_block_idx), no);
}
// Enumerate 3 edges in the voxel
for (int e = 0; e < 3; ++e) {
int vertex_idx = mesh_struct_ptr[e];
if (vertex_idx != -1) continue;
voxel_t* voxel_ptr_e = GetVoxelAt(
static_cast<int>(xv) + (e == 0),
static_cast<int>(yv) + (e == 1),
static_cast<int>(zv) + (e == 2),
static_cast<int>(workload_block_idx));
OPEN3D_ASSERT(
voxel_ptr_e != nullptr &&
"Internal error: GetVoxelAt returns nullptr.");
float tsdf_e = voxel_ptr_e->GetTSDF();
float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o);
int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
mesh_struct_ptr[e] = idx;
float ratio_x = ratio * int(e == 0);
float ratio_y = ratio * int(e == 1);
float ratio_z = ratio * int(e == 2);
float* vertex_ptr =
vertex_indexer.GetDataPtr<float>(idx);
vertex_ptr[0] = voxel_size * (x + ratio_x);
vertex_ptr[1] = voxel_size * (y + ratio_y);
vertex_ptr[2] = voxel_size * (z + ratio_z);
if (extract_normal) {
float* normal_ptr =
normal_indexer.GetDataPtr<float>(idx);
GetNormalAt(static_cast<int>(xv) + (e == 0),
static_cast<int>(yv) + (e == 1),
static_cast<int>(zv) + (e == 2),
static_cast<int>(workload_block_idx),
ne);
float nx = (1 - ratio) * no[0] + ratio * ne[0];
float ny = (1 - ratio) * no[1] + ratio * ne[1];
float nz = (1 - ratio) * no[2] + ratio * ne[2];
float norm = static_cast<float>(
sqrt(nx * nx + ny * ny + nz * nz) + 1e-5);
normal_ptr[0] = nx / norm;
normal_ptr[1] = ny / norm;
normal_ptr[2] = nz / norm;
}
if (extract_color) {
float* color_ptr =
color_indexer.GetDataPtr<float>(idx);
float r_o = voxel_ptr->GetR();
float g_o = voxel_ptr->GetG();
float b_o = voxel_ptr->GetB();
float r_e = voxel_ptr_e->GetR();
float g_e = voxel_ptr_e->GetG();
float b_e = voxel_ptr_e->GetB();
color_ptr[0] =
((1 - ratio) * r_o + ratio * r_e) / 255.0f;
color_ptr[1] =
((1 - ratio) * g_o + ratio * g_e) / 255.0f;
color_ptr[2] =
((1 - ratio) * b_o + ratio * b_e) / 255.0f;
}
}
});
});
// Pass 3: connect vertices and form triangles.
int triangle_count = vertex_count * 3;
triangles = core::Tensor({triangle_count, 3}, core::Int64,
block_values.GetDevice());
NDArrayIndexer triangle_indexer(triangles, 1);
#if defined(__CUDACC__)
count = core::Tensor(std::vector<int>{0}, {}, core::Int32,
block_values.GetDevice());
count_ptr = count.GetDataPtr<int>();
#else
(*count_ptr) = 0;
#endif
core::ParallelFor(indices.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t widx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
int table_idx = mesh_struct_ptr[3];
if (tri_count[table_idx] == 0) return;
for (size_t tri = 0; tri < 16; tri += 3) {
if (tri_table[table_idx][tri] == -1) return;
int tri_idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
for (size_t vertex = 0; vertex < 3; ++vertex) {
int edge = tri_table[table_idx][tri + vertex];
int64_t xv_i = xv + edge_shifts[edge][0];
int64_t yv_i = yv + edge_shifts[edge][1];
int64_t zv_i = zv + edge_shifts[edge][2];
int64_t edge_i = edge_shifts[edge][3];
int dxb = static_cast<int>(xv_i / resolution);
int dyb = static_cast<int>(yv_i / resolution);
int dzb = static_cast<int>(zv_i / resolution);
int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
int64_t block_idx_i =
*nb_block_indices_indexer.GetDataPtr<int64_t>(
workload_block_idx, nb_idx);
int* mesh_struct_ptr_i = mesh_structure_indexer.GetDataPtr<int>(
xv_i - dxb * resolution, yv_i - dyb * resolution,
zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]);
int64_t* triangle_ptr =
triangle_indexer.GetDataPtr<int64_t>(tri_idx);
triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i];
}
}
});
#if defined(__CUDACC__)
triangle_count = count.Item<int>();
#else
triangle_count = (*count_ptr).load();
#endif
utility::LogInfo("Total triangle count = {}", triangle_count);
triangles = triangles.Slice(0, 0, triangle_count);
}
#if defined(__CUDACC__)
void EstimateRangeCUDA
#else
void EstimateRangeCPU
#endif
(const core::Tensor& block_keys,
core::Tensor& range_minmax_map,
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
int h,
int w,
int down_factor,
int64_t block_resolution,
float voxel_size,
float depth_min,
float depth_max) {
// TODO(wei): reserve it in a reusable buffer
// Every 2 channels: (min, max)
int h_down = h / down_factor;
int w_down = w / down_factor;
range_minmax_map = core::Tensor({h_down, w_down, 2}, core::Float32,
block_keys.GetDevice());
NDArrayIndexer range_map_indexer(range_minmax_map, 2);
// Every 6 channels: (v_min, u_min, v_max, u_max, z_min, z_max)
const int fragment_size = 16;
const int frag_buffer_size = 65535;
// TODO(wei): explicit buffer
core::Tensor fragment_buffer = core::Tensor(
{frag_buffer_size, 6}, core::Float32, block_keys.GetDevice());
NDArrayIndexer frag_buffer_indexer(fragment_buffer, 1);
NDArrayIndexer block_keys_indexer(block_keys, 1);
TransformIndexer w2c_transform_indexer(intrinsics, extrinsics);
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {1}, core::Int32,
block_keys.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
#ifndef __CUDACC__
using std::max;
using std::min;
#endif
// Pass 0: iterate over blocks, fill-in an rendering fragment array
core::ParallelFor(
block_keys.GetDevice(), block_keys.GetLength(),
[=] OPEN3D_DEVICE(int64_t workload_idx) {
int* key = block_keys_indexer.GetDataPtr<int>(workload_idx);
int u_min = w_down - 1, v_min = h_down - 1, u_max = 0,
v_max = 0;
float z_min = depth_max, z_max = depth_min;
float xc, yc, zc, u, v;
// Project 8 corners to low-res image and form a rectangle
for (int i = 0; i < 8; ++i) {
float xw = (key[0] + ((i & 1) > 0)) * block_resolution *
voxel_size;
float yw = (key[1] + ((i & 2) > 0)) * block_resolution *
voxel_size;
float zw = (key[2] + ((i & 4) > 0)) * block_resolution *
voxel_size;
w2c_transform_indexer.RigidTransform(xw, yw, zw, &xc, &yc,
&zc);
if (zc <= 0) continue;
// Project to the down sampled image buffer
w2c_transform_indexer.Project(xc, yc, zc, &u, &v);
u /= down_factor;
v /= down_factor;
v_min = min(static_cast<int>(floorf(v)), v_min);
v_max = max(static_cast<int>(ceilf(v)), v_max);
u_min = min(static_cast<int>(floorf(u)), u_min);
u_max = max(static_cast<int>(ceilf(u)), u_max);
z_min = min(z_min, zc);
z_max = max(z_max, zc);
}
v_min = max(0, v_min);
v_max = min(h_down - 1, v_max);
u_min = max(0, u_min);
u_max = min(w_down - 1, u_max);
if (v_min >= v_max || u_min >= u_max || z_min >= z_max) return;
// Divide the rectangle into small 16x16 fragments
int frag_v_count =
ceilf(float(v_max - v_min + 1) / float(fragment_size));
int frag_u_count =
ceilf(float(u_max - u_min + 1) / float(fragment_size));
int frag_count = frag_v_count * frag_u_count;
int frag_count_start = OPEN3D_ATOMIC_ADD(count_ptr, 1);
int frag_count_end = frag_count_start + frag_count;
if (frag_count_end >= frag_buffer_size) {
printf("Fragment count exceeding buffer size, abort!\n");
}
int offset = 0;
for (int frag_v = 0; frag_v < frag_v_count; ++frag_v) {
for (int frag_u = 0; frag_u < frag_u_count;
++frag_u, ++offset) {
float* frag_ptr = frag_buffer_indexer.GetDataPtr<float>(
frag_count_start + offset);
// zmin, zmax
frag_ptr[0] = z_min;
frag_ptr[1] = z_max;
// vmin, umin
frag_ptr[2] = v_min + frag_v * fragment_size;
frag_ptr[3] = u_min + frag_u * fragment_size;
// vmax, umax
frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1,
static_cast<float>(v_max));
frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1,
static_cast<float>(u_max));
}
}
});
#if defined(__CUDACC__)
int frag_count = count[0].Item<int>();
#else
int frag_count = (*count_ptr).load();
#endif
// Pass 0.5: Fill in range map to prepare for atomic min/max
core::ParallelFor(block_keys.GetDevice(), h_down * w_down,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
int v = workload_idx / w_down;
int u = workload_idx % w_down;
float* range_ptr =
range_map_indexer.GetDataPtr<float>(u, v);
range_ptr[0] = depth_max;
range_ptr[1] = depth_min;
});
// Pass 1: iterate over rendering fragment array, fill-in range
core::ParallelFor(
block_keys.GetDevice(), frag_count * fragment_size * fragment_size,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
int frag_idx = workload_idx / (fragment_size * fragment_size);
int local_idx = workload_idx % (fragment_size * fragment_size);
int dv = local_idx / fragment_size;
int du = local_idx % fragment_size;
float* frag_ptr =
frag_buffer_indexer.GetDataPtr<float>(frag_idx);
int v_min = static_cast<int>(frag_ptr[2]);
int u_min = static_cast<int>(frag_ptr[3]);
int v_max = static_cast<int>(frag_ptr[4]);
int u_max = static_cast<int>(frag_ptr[5]);
int v = v_min + dv;
int u = u_min + du;
if (v > v_max || u > u_max) return;
float z_min = frag_ptr[0];
float z_max = frag_ptr[1];
float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v);
#ifdef __CUDACC__
atomicMinf(&(range_ptr[0]), z_min);
atomicMaxf(&(range_ptr[1]), z_max);
#else
#pragma omp critical(EstimateRangeCPU)
{
range_ptr[0] = min(z_min, range_ptr[0]);
range_ptr[1] = max(z_max, range_ptr[1]);
}
#endif
});
#if defined(__CUDACC__)
core::cuda::Synchronize();
#endif
}
struct BlockCache {
int x;
int y;
int z;
int block_idx;
inline int OPEN3D_DEVICE Check(int xin, int yin, int zin) {
return (xin == x && yin == y && zin == z) ? block_idx : -1;
}
inline void OPEN3D_DEVICE Update(int xin,
int yin,
int zin,
int block_idx_in) {
x = xin;
y = yin;
z = zin;
block_idx = block_idx_in;
}
};
#if defined(__CUDACC__)
void RayCastCUDA
#else
void RayCastCPU
#endif
(std::shared_ptr<core::DeviceHashmap>& hashmap,
const core::Tensor& block_values,
const core::Tensor& range_map,
core::Tensor& vertex_map,
core::Tensor& depth_map,
core::Tensor& color_map,
core::Tensor& normal_map,
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
int h,
int w,
int64_t block_resolution,
float voxel_size,
float sdf_trunc,
float depth_scale,
float depth_min,
float depth_max,
float weight_threshold) {
using Key = core::Block<int, 3>;
using Hash = core::BlockHash<int, 3>;
#if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
auto cuda_hashmap =
std::dynamic_pointer_cast<core::StdGPUHashmap<Key, Hash>>(hashmap);
if (cuda_hashmap == nullptr) {
utility::LogError(
"Unsupported backend: CUDA raycasting only supports STDGPU.");
}
auto hashmap_impl = cuda_hashmap->GetImpl();
#else
auto cpu_hashmap =
std::dynamic_pointer_cast<core::TBBHashmap<Key, Hash>>(hashmap);
auto hashmap_impl = *cpu_hashmap->GetImpl();
#endif
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer range_map_indexer(range_map, 2);
NDArrayIndexer vertex_map_indexer;
NDArrayIndexer depth_map_indexer;
NDArrayIndexer color_map_indexer;
NDArrayIndexer normal_map_indexer;
bool enable_vertex = (vertex_map.GetLength() != 0);
bool enable_depth = (depth_map.GetLength() != 0);
bool enable_color = (color_map.GetLength() != 0);
bool enable_normal = (normal_map.GetLength() != 0);
if (!enable_vertex && !enable_depth && !enable_color && !enable_normal) {
utility::LogWarning("No output specified for ray casting, exit.");
return;
}
if (enable_vertex) {
vertex_map_indexer = NDArrayIndexer(vertex_map, 2);
}
if (enable_depth) {
depth_map_indexer = NDArrayIndexer(depth_map, 2);
}
if (enable_color) {
color_map_indexer = NDArrayIndexer(color_map, 2);
}
if (enable_normal) {
normal_map_indexer = NDArrayIndexer(normal_map, 2);
}
TransformIndexer c2w_transform_indexer(
intrinsics, t::geometry::InverseTransformation(extrinsics));
TransformIndexer w2c_transform_indexer(intrinsics, extrinsics);
int64_t rows = h;
int64_t cols = w;
float block_size = voxel_size * block_resolution;
#ifndef __CUDACC__
using std::max;
#endif
DISPATCH_BYTESIZE_TO_VOXEL(voxel_block_buffer_indexer.ElementByteSize(), [&]() {
core::ParallelFor(
hashmap->GetDevice(), rows * cols,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
auto GetVoxelAtP = [&] OPEN3D_DEVICE(
int x_b, int y_b, int z_b,
int x_v, int y_v, int z_v,
core::addr_t block_addr,
BlockCache& cache) -> voxel_t* {
int x_vn = (x_v + block_resolution) % block_resolution;
int y_vn = (y_v + block_resolution) % block_resolution;
int z_vn = (z_v + block_resolution) % block_resolution;
int dx_b = Sign(x_v - x_vn);
int dy_b = Sign(y_v - y_vn);
int dz_b = Sign(z_v - z_vn);
if (dx_b == 0 && dy_b == 0 && dz_b == 0) {
return voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(x_v, y_v, z_v,
block_addr);
} else {
Key key;
key.Set(0, x_b + dx_b);
key.Set(1, y_b + dy_b);
key.Set(2, z_b + dz_b);
int block_addr = cache.Check(key.Get(0), key.Get(1),
key.Get(2));
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return nullptr;
block_addr = iter->second;
cache.Update(key.Get(0), key.Get(1), key.Get(2),
block_addr);
}
return voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(x_vn, y_vn, z_vn,
block_addr);
}
};
auto GetVoxelAtT = [&] OPEN3D_DEVICE(
float x_o, float y_o, float z_o,
float x_d, float y_d, float z_d,
float t,
BlockCache& cache) -> voxel_t* {
float x_g = x_o + t * x_d;
float y_g = y_o + t * y_d;
float z_g = z_o + t * z_d;
// Block coordinate and look up
int x_b = static_cast<int>(floorf(x_g / block_size));
int y_b = static_cast<int>(floorf(y_g / block_size));
int z_b = static_cast<int>(floorf(z_g / block_size));
Key key;
key.Set(0, x_b);
key.Set(1, y_b);
key.Set(2, z_b);
int block_addr = cache.Check(x_b, y_b, z_b);
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return nullptr;
block_addr = iter->second;
cache.Update(x_b, y_b, z_b, block_addr);
}
// Voxel coordinate and look up
int x_v = int((x_g - x_b * block_size) / voxel_size);
int y_v = int((y_g - y_b * block_size) / voxel_size);
int z_v = int((z_g - z_b * block_size) / voxel_size);
return voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
x_v, y_v, z_v, block_addr);
};
int64_t y = workload_idx / cols;
int64_t x = workload_idx % cols;
float *depth_ptr = nullptr, *vertex_ptr = nullptr,
*normal_ptr = nullptr, *color_ptr = nullptr;
if (enable_depth) {
depth_ptr = depth_map_indexer.GetDataPtr<float>(x, y);
*depth_ptr = 0;
}
if (enable_vertex) {
vertex_ptr = vertex_map_indexer.GetDataPtr<float>(x, y);
vertex_ptr[0] = 0;
vertex_ptr[1] = 0;
vertex_ptr[2] = 0;
}
if (enable_color) {
color_ptr = color_map_indexer.GetDataPtr<float>(x, y);
color_ptr[0] = 0;
color_ptr[1] = 0;
color_ptr[2] = 0;
}
if (enable_normal) {
normal_ptr = normal_map_indexer.GetDataPtr<float>(x, y);
normal_ptr[0] = 0;
normal_ptr[1] = 0;
normal_ptr[2] = 0;
}
const float* range =
range_map_indexer.GetDataPtr<float>(x / 8, y / 8);
float t = range[0];
const float t_max = range[1];
if (t >= t_max) return;
// Coordinates in camera and global
float x_c = 0, y_c = 0, z_c = 0;
float x_g = 0, y_g = 0, z_g = 0;
float x_o = 0, y_o = 0, z_o = 0;
// Iterative ray intersection check
float t_prev = t;
float tsdf_prev = -1.0f;
float tsdf = 1.0;
float w = 0.0;
// Camera origin
c2w_transform_indexer.RigidTransform(0, 0, 0, &x_o, &y_o,
&z_o);
// Direction
c2w_transform_indexer.Unproject(static_cast<float>(x),
static_cast<float>(y), 1.0f,
&x_c, &y_c, &z_c);
c2w_transform_indexer.RigidTransform(x_c, y_c, z_c, &x_g,
&y_g, &z_g);
float x_d = (x_g - x_o);
float y_d = (y_g - y_o);
float z_d = (z_g - z_o);
BlockCache cache{0, 0, 0, -1};
bool surface_found = false;
while (t < t_max) {
voxel_t* voxel_ptr = GetVoxelAtT(x_o, y_o, z_o, x_d,
y_d, z_d, t, cache);
if (!voxel_ptr) {
t_prev = t;
t += block_size;
} else {
tsdf_prev = tsdf;
tsdf = voxel_ptr->GetTSDF();
w = voxel_ptr->GetWeight();
if (tsdf_prev > 0 && w >= weight_threshold &&
tsdf <= 0) {
surface_found = true;
break;
}
t_prev = t;
float delta = tsdf * sdf_trunc;
t += delta < voxel_size ? voxel_size : delta;
}
}
if (surface_found) {
float t_intersect = (t * tsdf_prev - t_prev * tsdf) /
(tsdf_prev - tsdf);
x_g = x_o + t_intersect * x_d;
y_g = y_o + t_intersect * y_d;
z_g = z_o + t_intersect * z_d;
// Trivial vertex assignment
if (enable_depth) {
*depth_ptr = t_intersect * depth_scale;
}
if (enable_vertex) {
w2c_transform_indexer.RigidTransform(
x_g, y_g, z_g, vertex_ptr + 0,
vertex_ptr + 1, vertex_ptr + 2);
}
// Trilinear interpolation
// TODO(wei): simplify the flow by splitting the
// functions given what is enabled
if (enable_color || enable_normal) {
int x_b =
static_cast<int>(floorf(x_g / block_size));
int y_b =
static_cast<int>(floorf(y_g / block_size));
int z_b =
static_cast<int>(floorf(z_g / block_size));
float x_v = (x_g - float(x_b) * block_size) /
voxel_size;
float y_v = (y_g - float(y_b) * block_size) /
voxel_size;
float z_v = (z_g - float(z_b) * block_size) /
voxel_size;
Key key;
key.Set(0, x_b);
key.Set(1, y_b);
key.Set(2, z_b);
int block_addr = cache.Check(x_b, y_b, z_b);
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return;
block_addr = iter->second;
cache.Update(x_b, y_b, z_b, block_addr);
}
int x_v_floor = static_cast<int>(floorf(x_v));
int y_v_floor = static_cast<int>(floorf(y_v));
int z_v_floor = static_cast<int>(floorf(z_v));
float ratio_x = x_v - float(x_v_floor);
float ratio_y = y_v - float(y_v_floor);
float ratio_z = z_v - float(z_v_floor);
float sum_weight_color = 0.0;
float sum_weight_normal = 0.0;
for (int k = 0; k < 8; ++k) {
int dx_v = (k & 1) > 0 ? 1 : 0;
int dy_v = (k & 2) > 0 ? 1 : 0;
int dz_v = (k & 4) > 0 ? 1 : 0;
float ratio = (dx_v * (ratio_x) +
(1 - dx_v) * (1 - ratio_x)) *
(dy_v * (ratio_y) +
(1 - dy_v) * (1 - ratio_y)) *
(dz_v * (ratio_z) +
(1 - dz_v) * (1 - ratio_z));
voxel_t* voxel_ptr_k = GetVoxelAtP(
x_b, y_b, z_b, x_v_floor + dx_v,
y_v_floor + dy_v, z_v_floor + dz_v,
block_addr, cache);
if (enable_color && voxel_ptr_k &&
voxel_ptr_k->GetWeight() > 0) {
sum_weight_color += ratio;
color_ptr[0] += ratio * voxel_ptr_k->GetR();
color_ptr[1] += ratio * voxel_ptr_k->GetG();
color_ptr[2] += ratio * voxel_ptr_k->GetB();
}
if (enable_normal) {
for (int dim = 0; dim < 3; ++dim) {
voxel_t* voxel_ptr_k_plus = GetVoxelAtP(
x_b, y_b, z_b,
x_v_floor + dx_v + (dim == 0),
y_v_floor + dy_v + (dim == 1),
z_v_floor + dz_v + (dim == 2),
block_addr, cache);
voxel_t* voxel_ptr_k_minus =
GetVoxelAtP(x_b, y_b, z_b,
x_v_floor + dx_v -
(dim == 0),
y_v_floor + dy_v -
(dim == 1),
z_v_floor + dz_v -
(dim == 2),
block_addr, cache);
bool valid = false;
if (voxel_ptr_k_plus &&
voxel_ptr_k_plus->GetWeight() > 0) {
normal_ptr[dim] +=
ratio *
voxel_ptr_k_plus
->GetTSDF() /
(2 * voxel_size);
valid = true;
}
if (voxel_ptr_k_minus &&
voxel_ptr_k_minus->GetWeight() >
0) {
normal_ptr[dim] -=
ratio *
voxel_ptr_k_minus
->GetTSDF() /
(2 * voxel_size);
valid = true;
}
sum_weight_normal += valid ? ratio : 0;
}
} // if (enable_normal)
} // loop over 8 neighbors
if (enable_color && sum_weight_color > 0) {
sum_weight_color *= 255.0;
color_ptr[0] /= sum_weight_color;
color_ptr[1] /= sum_weight_color;
color_ptr[2] /= sum_weight_color;
}
if (enable_normal && sum_weight_normal > 0) {
normal_ptr[0] /= sum_weight_normal;
normal_ptr[1] /= sum_weight_normal;
normal_ptr[2] /= sum_weight_normal;
float norm =
sqrt(normal_ptr[0] * normal_ptr[0] +
normal_ptr[1] * normal_ptr[1] +
normal_ptr[2] * normal_ptr[2]);
w2c_transform_indexer.Rotate(
normal_ptr[0] / norm,
normal_ptr[1] / norm,
normal_ptr[2] / norm, normal_ptr + 0,
normal_ptr + 1, normal_ptr + 2);
}
} // if (color or normal)
} // if (tsdf < 0)
});
});
#if defined(__CUDACC__)
core::cuda::Synchronize();
#endif
}
} // namespace tsdf
} // namespace kernel
} // namespace geometry
} // namespace t
} // namespace open3d
|
meanshift.h | #ifndef MEAN_SHIFT_H
#define MEAN_SHIFT_H
#include <algorithm>
#include <cmath>
#include "container.h"
#include "container_io.h"
#include <iostream>
#include "utils.h"
//#include <chrono>
namespace mean_shift {
namespace omp {
template <typename T, const size_t N, const size_t D>
std::vector<vec<T, D>> cluster_points(mat<T, N, D>& data,
const size_t niter,
const float bandwidth,
const float radius,
const float min_distance,
const double eps) {
const float double_sqr_bdw = 2 * bandwidth * bandwidth;
vec<bool, N> has_stopped {false};
std::vector<vec<T, D>> centroids;
mat<T, N, D> new_data;
for (size_t i = 0; i < niter; ++i) {
#pragma omp parallel for default(none) \
shared(data, niter, bandwidth, eps, radius, double_sqr_bdw, has_stopped, centroids, new_data, min_distance) \
schedule(dynamic)
for (size_t p = 0; p < N; ++p) {
if (has_stopped[p]) {
#pragma omp critical
{
if ((centroids.size() == 0) || (is_centroid(centroids, data[p], min_distance))) {
centroids.emplace_back(data[p]);
}
}
continue;
}
vec<T, D> new_position {};
float sum_weights = 0.;
for (size_t q = 0; q < N; ++q) {
double dist = calc_distance(data[p], data[q]);
if (dist <= radius) {
float gaussian = std::exp(- dist / double_sqr_bdw);
new_position = new_position + data[q] * gaussian;
sum_weights += gaussian;
}
}
new_position = new_position / sum_weights;
double shift = calc_distance(data[p], new_position);
if (shift <= eps) {
#pragma omp atomic write
has_stopped[p] = true;
}
#pragma omp critical
new_data[p] = new_position;
}
data = new_data;
if (std::all_of(has_stopped.begin(), has_stopped.end(), [](bool b) {return b;})) {
std::cout << "With eps = " << eps << " took " << i << " iterations!\n";
return centroids;
}
}
return centroids;
}
template <typename T, const size_t N, const size_t D>
std::vector<vec<T, D>> cluster_points(mat<T, N, D>& data,
const size_t niter,
const float bandwidth,
const float radius,
const float min_distance) {
const float double_sqr_bdw = 2 * bandwidth * bandwidth;
mat<T, N, D> new_data;
for (size_t i = 0; i < niter; ++i) {
#pragma omp parallel for default(none) \
shared(data, niter, bandwidth, radius, double_sqr_bdw, new_data) \
schedule(dynamic)
for (size_t p = 0; p < N; ++p) {
vec<T, D> new_position {};
float sum_weights = 0.;
for (size_t q = 0; q < N; ++q) {
double dist = calc_distance(data[p], data[q]);
if (dist <= radius) {
float gaussian = std::exp(- dist / double_sqr_bdw);
new_position = new_position + data[q] * gaussian;
sum_weights += gaussian;
}
}
#pragma omp critical
new_data[p] = new_position / sum_weights;
}
data = new_data;
}
return reduce_to_centroids(data, min_distance);
}
} // namespace omp
} // namespace mean_shift
#endif |
declare-target-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
int foo (void), bar (void);
extern int a;
int b;
char d;
#pragma omp declare target
long c;
#pragma omp end declare target
#pragma omp declare target (bar, a)
#pragma omp declare target to (b) link (d) to (foo)
|
bd_omp_myc.c | #include <stdlib.h>
#include <stdio.h>
#include <unistd.h> // access
#include <math.h>
#include <assert.h>
#include "timer.h"
#include "bd.h"
#include <omp.h>
#include <mkl.h>
#include "matrix.h"
#define NTHREADS 1
#define M_PI 3.14159265358979323846
#define my_EPS 0.000000001
void print_matrix(double *a, int n){
for(int i=0;i<5;i++){
for(int j=0;j<n;j++){
printf("%lf ", a[i*n+j]);
}
printf("\n\n");
}
return;
}
void print_array(double *a, int n){
for(int i=0;i<n;i++){
printf("%lf ", a[i]);
}
printf("\n");
return;
}
//********************************CHOLESKY*****************************************
void show_matrix(double *A, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++)
printf("%2.5f ", A[i * n + j]);
printf("\n");
}
}
double * cholOMP(double * L, int n) {
int i, j, k;
omp_lock_t writelock;
omp_init_lock(&writelock);
for (int j = 0; j < n; j++) {
for (int i = 0; i < j; i++){
L[i*n+j] = 0;
}
#pragma omp parallel for shared(L) private(k)
for (k = 0; k < i; k++) {
omp_set_lock(&writelock);
L[j*n+j] = L[j*n+j] - L[j*n+k] * L[j*n+k]; //Critical section.
omp_unset_lock(&writelock);
}
#pragma omp single
L[i*n+i] = sqrt(L[j*n+j]);
#pragma omp parallel for shared(L) private(i, k)
for (i = j+1; i < n; i++) {
for (k = 0; k < j; k++) {
L[i*n+j] = L[i*n+j] - L[i*n+k] * L[j*n+k];
}
L[i*n+j] = L[i*n+j] / L[j*n+j];
}
omp_destroy_lock(&writelock);
}
return L;
}
double mul_sum(double *x, double *y, int n) {
double s = 0;
for (int i = 0; i < n; i++) {
s += x[i]*y[i];
}
return s;
}
//Return the transpose of a square matrix.
void my_transpose(double *A, int n){// A = n*n
int temp;
for(int i = 0 ; i < n ; i++){
for (int j = i+1; j < n ; j++){
temp = A[i*n+j];
A[i*n+j] = A[j*n+i];
A[j*n+i] = temp;
}
}
return ;
}
double *cho2(double *X, int n) {
double *L_c = (double*)calloc(n*n, sizeof(double));
for (int j = 0; j <n; j++) {
double s = mul_sum(&L_c[j * n], &L_c[j * n], j);
// using the inner product and updating the matrix values
L_c[j * n + j] = sqrt(X[j * n + j] - s);
#pragma omp parallel for schedule(static)
for (int i = j+1; i <n; i++) {// parallelizing the inner loop
double s = mul_sum(&L_c[j * n], &L_c[i * n], j);
L_c[i * n + j] = (X[i*n+j]-s)/ L_c[j * n + j] ;
}
}
my_transpose(L_c, n);
return L_c;
}
//****************************** RPY_EWALD part *****************************************************
inline void scalar_rpy_ewald_real(double r, double xi, double a3, double *m11, double *m12)
{
double a = 1.;
double xi2 = xi*xi;
double xi3 = xi2*xi;
double xi5 = xi3*xi2;
double xi7 = xi5*xi2;
double r2 = r*r;
double r4 = r2*r2;
double ri = 1./r;
double ri2 = ri*ri;
double ri3 = ri*ri2;
double erfc_xi_r = erfc(xi*r);
double pi_exp = 1./sqrt(M_PI) * exp(-xi2*r2);
*m11 = (0.75*a*ri + 0.5*a3*ri3)*erfc_xi_r + ( 4*xi7*a3*r4 + 3*xi3*a*r2 - 20*xi5*a3*r2 - 4.5*xi*a + 14*xi3*a3 + xi*a3*ri2)*pi_exp;
*m12 = (0.75*a*ri - 1.5*a3*ri3)*erfc_xi_r + (-4*xi7*a3*r4 - 3*xi3*a*r2 + 16*xi5*a3*r2 + 1.5*xi*a - 2*xi3*a3 - 3*xi*a3*ri2)*pi_exp;
}
inline void scalar_rpy_ewald_recip(double k, double xi, double *m2)
{
double a = 1.;
double a3 = 1.;
double k2 = k*k;
double xii2k2 = k2/(xi*xi);
*m2 = (1. + 0.25*xii2k2 + 0.125*xii2k2*xii2k2) * 6.*M_PI/k2 * exp(-0.25*xii2k2);
}
// note: positions must be wrapped inside the box [0,L]
int rpy_ewald(int np, double * restrict a, const double * restrict pos, double L, const double * restrict rad, double xi, int nr, int nk)
{
// printf("Inside function rpy_ewald\n");
__declspec(align(64)) double rvec[8];
__declspec(align(64)) double rvec0[8];
__declspec(align(64)) double temp[8];
double a3;
double m11, m12, m2;
double eye3_coef;
double r2, r;
int x, y, z;
int i, j;
double *ap0, *ap;
int vsize = ((2*nk+1)*(2*nk+1)*(2*nk+1) - 1) / 2;
#define VSIZE ((2*6+1)*(2*6+1)*(2*6+1) - 1) / 2
// int A_VSIZE = ceil(VSIZE/8.0)*8;
// int K_VSIZE = ceil(3*VSIZE/8.0)*8;
// printf("check vsize=%d\n", A_VSIZE);
__declspec(align(64)) double k_array[VSIZE];//1104
__declspec(align(64)) double m2_array[VSIZE];//1104
__declspec(align(64)) double kvec_array[3*VSIZE];//3296
int ind;
__declspec(align(64)) double kvec[8];
double k;
double t;
double vinv = 1./(L*L*L);
double time0, time1;
double time0_real, time1_real;
double time0_recip, time1_recip;
// INDICES for converting for loops
int _b, _index, ib, ib2;
// *************************************************************************
// // compute and save coefficients for reciprocal-space sum
// // Due to symmetry, only need half of the grid points
ind = 0;
_b = (2*nk+1);
for (_index =0 ;_index < (_b*_b*_b -1)/2; _index++){// Using indices x,y,z are recalculated
z = _index%(_b)-nk;// adjusting the indices
x = (_index-_index%(_b*_b))/(_b*_b)-nk;
y = (_index%(_b*_b)-_index%(_b))/_b-nk;
k_array[ind] = 2.*M_PI/L*sqrt((double)(x*x + y*y + z*z));
scalar_rpy_ewald_recip(k_array[ind], xi, &m2_array[ind]);
kvec_array[3*ind ] = 2.*M_PI/L*x;
kvec_array[3*ind+1] = 2.*M_PI/L*y;
kvec_array[3*ind+2] = 2.*M_PI/L*z;
ind++;
}
#pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, ap, ap0, _b, temp, eye3_coef, _index, rvec0, rvec, x, y, z, r, r2, m11, m12, a3 )
for (int _index1 = np*(np-1)/2-1; _index1>=0; _index1--){
i = np-1-(int)((1+sqrt(8*_index1+1))/2);
j = np-1-_index1 + (int)((1+sqrt(8*_index1+1))/2)*((int)((1+sqrt(8*_index1+1))/2)-1)/2;
temp[0] = 0.;
temp[1] = 0.; temp[3] = 0.;
temp[2] = 0.; temp[4] = 0.; temp[5] = 0.;
eye3_coef = 0.;
rvec0[0] = pos[3*i] - pos[3*j];
rvec0[1] = pos[3*i+1] - pos[3*j+1];
rvec0[2] = pos[3*i+2] - pos[3*j+2];
a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]);
_b = (2*nr+1);
//shared(eye3_coef, temp, rvec0, L, xi, a3, m11, m12, _b, xi3, xi5, xi7, xi)
//// #pragma omp parallel for schedule(static) private(rvec, x, y, z, r, r2, m11, m12) shared(eye3_coef, temp, rvec0, a3)
for (_index =0 ;_index < _b*_b*_b; _index++){
z =_index%(_b)-nr;// adjusting the indices
x = (_index-_index%(_b*_b))/(_b*_b)-nr;
y = (_index%(_b*_b)-_index%(_b))/_b-nr;
rvec[0] = rvec0[0] + x*L;
rvec[1] = rvec0[1] + y*L;
rvec[2] = rvec0[2] + z*L;
// compute norm
r2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2];
r = sqrt(r2);
rvec[0] /= r;
rvec[1] /= r;
rvec[2] /= r;
scalar_rpy_ewald_real(r, xi, a3, &m11, &m12);
eye3_coef += m11;
temp[0] += m12 * rvec[0] * rvec[0];
temp[1] += m12 * rvec[0] * rvec[1];
temp[2] += m12 * rvec[0] * rvec[2];
temp[3] += m12 * rvec[1] * rvec[1];
temp[4] += m12 * rvec[1] * rvec[2];
temp[5] += m12 * rvec[2] * rvec[2];
}
// add contribution to eye3 term
temp[0] += eye3_coef;
temp[3] += eye3_coef;
temp[5] += eye3_coef;
// sum into global matrix (only lower-triangular part)
// // Use matlab to add transpose
ap0 = &a[np*3*3*i + 3*j];
ap = ap0;
*ap++ = temp[0];
*ap++ = temp[1];
*ap = temp[2];
ap = ap0+np*3;
*ap++ = temp[1];
*ap++ = temp[3];
*ap = temp[4];
ap = ap0+np*3+np*3;
*ap++ = temp[2];
*ap++ = temp[4];
*ap = temp[5];
}
// reciprocal-space sum
#pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, temp, ap, ap0, ind, rvec, kvec, k, m2, t, a3)
for (_index = np*(np+1)/2-1; _index>=0; _index--){
i = np-1-(int)((-1+sqrt(8*_index+1))/2);
j = np-1-_index + (int)((-1+sqrt(8*_index+1))/2)*((int)((-1+sqrt(8*_index+1))/2)+1)/2;
rvec[0] = pos[3*i+0] - pos[3*j];
rvec[1] = pos[3*i+1] - pos[3*j+1];
rvec[2] = pos[3*i+2] - pos[3*j+2];
temp[0] = 0.;
temp[1] = 0.; temp[3] = 0.;
temp[2] = 0.; temp[4] = 0.; temp[5] = 0.;
a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]);
for (ind=0; ind<vsize; ind++)
{
k = k_array[ind];
m2 = m2_array[ind];
kvec[0] = kvec_array[3*ind ];
kvec[1] = kvec_array[3*ind+1];
kvec[2] = kvec_array[3*ind+2];
t = 2.*vinv*m2*cos(kvec[0]*rvec[0] + kvec[1]*rvec[1] + kvec[2]*rvec[2])*(1.-a3*k*k/3.);
kvec[0] /= k;
kvec[1] /= k;
kvec[2] /= k;
temp[0] += t * (1. - kvec[0]*kvec[0]);
temp[1] += t * - kvec[0]*kvec[1];
temp[2] += t * - kvec[0]*kvec[2];
temp[3] += t * (1. - kvec[1]*kvec[1]);
temp[4] += t * - kvec[1]*kvec[2];
temp[5] += t * (1. - kvec[2]*kvec[2]);
}
// sum into matrix
// // sum with existing values
ap0 = &a[np*3*3*i + 3*j];
ap = ap0;
*ap++ += temp[0];
*ap++ += temp[1];
*ap += temp[2];
ap = ap0+np*3;
*ap++ += temp[1];
*ap++ += temp[3];// diagonal element
*ap += temp[4];
ap = ap0+np*3+np*3;
*ap++ += temp[2];
*ap++ += temp[4];
*ap += temp[5];// diagonal element
}
// self-part
for (i=0; i<np; i++)// adding some term to diagonal
{
t = 1./rad[i] - (6. - 40./3.*xi*xi*rad[i]*rad[i])*xi/sqrt(M_PI);
t *= 0.5;
for (j=0; j<3; j++)
{
ind = 3*i+j;
a[ind*np*3+ind] = a[ind*np*3+ind]*0.5+t;// taking care of (i==j) condition
}
}
return 0;
}
//**************************************************************************************************
void get_indices(int index, int *i, int *j, int *k, int b){
int ib, ib2;
ib = index%(b); ib2 = index%(b*b);
*k = ib;
*i = (index-ib2)/(b*b);
*j = (ib2-*k)/b;
return;
}
struct box
{
int head;
};
// it is possible to use smaller boxes and more complex neighbor patterns
#define NUM_BOX_NEIGHBORS 14
int box_neighbors[NUM_BOX_NEIGHBORS][3] =
{
{-1,-1,-1},
{-1,-1, 0},
{-1,-1,+1},
{-1, 0,-1},
{-1, 0, 0},
{-1, 0,+1},
{-1,+1,-1},
{-1,+1, 0},
{-1,+1,+1},
{ 0,-1,-1},
{ 0,-1, 0},
{ 0,-1,+1},
{ 0, 0,-1},
{ 0, 0, 0} // will calculate within the box interactions
};
/*
// CHECK RPY*************
int gold_read(const char *filename, int npos, double *gold)
{
int npos_read;
FILE *fp = fopen(filename, "r");
assert(fp);
fscanf(fp, "%d\n", &npos_read);
char label[100];
fgets(label, 100, fp);
assert(npos == npos_read);
for (int i=0; i<3*npos; i++) {
for (int j=0; j<3*npos; j++) {
fscanf(fp, "%lf\n", &gold[i*(3*npos) + j]);
}
}
fclose(fp);
return 0;
}
double compare_gold(int npos, double *a,double *gold) {
double err = 0.0;
printf("a = %lf\n", a[3]);
printf("gold = %lf\n", gold[3]);
for (int i=0; i<npos; i++) {
for (int j=0; j<npos; j++) {
double diff = a[i*(npos*3) + j] - gold[i*(npos*3) +j];
err += diff*diff;
// if(err>0){printf("error at position: i=%d j=%d and err = %lf\n", i, j, err);}
// printf("error at position: i=%d j=%d and err = %lf\n", i, j, err);
}
}
return err;
}
// **********************
*/
int bd(int npos, double * restrict pos_orig, double * restrict buf, const int *types, double L, double * restrict pos, int* restrict next, double* restrict forces, double f_const, double * restrict au, double * restrict rad, double xi, int nr, int nk, double * restrict hd_vec)
{
/*
//************************** CHECK RPY part ***************************************************
printf("npos = %d, L= %lf\n", npos, L);
char *gold_filename = "gold.dat";
double *gold = (double *) _mm_malloc((3*npos) * (3*npos) * sizeof(double), 64);
if (access(gold_filename, F_OK) == -1) {
printf("[WARNING] Unable to access gold file \"%s\"; comparison will not proceed.\n", gold_filename);
} else {
gold_read(gold_filename, npos, gold);
}
rpy_ewald(npos, au, pos_orig, L, rad, xi, nr, nk);// DELETE after testing
double error = compare_gold(npos, au, gold);
printf("Squared Error: %f\n", error);
return 500;
//*********************************************************************************************
*/
// Initialisations required for INTERACTION FUNCTION******** NOTE: Can take input to bd itself!!!
double krepul = 100, a=1, a_sq, phi=0.2, f;
a_sq = a*a;
int boxdim;// boxdim is number of cells in L
double cutoff2; int numpairs_p;
cutoff2 = 4;// cutoff < L/boxdim
boxdim =(int)(L/cutoff2)*a;//(int)(L/cutoff2*0.8);
printf("L = %lf cutoff2 = %lf boxdim = %d\n", L, cutoff2, boxdim);
struct box b[boxdim][boxdim][boxdim];
struct box *bp;
struct box *neigh_bp;
// box indices
int idx, idy, idz, index, box2, ib2;
int neigh_idx, neigh_idy, neigh_idz;
// allocate implied linked list
int p1, p2, j, i;
double d2, dx, dy, dz, s;
box2 = boxdim*boxdim;
//*****************************************END initialisations***********************************
if (boxdim < 4 || cutoff2 > (L/boxdim)*(L/boxdim))
{
printf("interactions: bad input parameters\n");
// return 1;
}
double t0, t_init_cells = 0, t_assign_to_cells=0, t_update_pos=0, t_force=0, t_hd = 0, t_cho = 0;
for (int step=0; step<INTERVAL_LEN; step++)
{
// printf("step = %d\n", step);
// Calculation of interaction per time step
t0 = time_in_seconds();
// allocate memory for particles in each box
// #pragma omp parallel for schedule(static) private(idx, idy, idz, ib2) shared(b, boxdim, box2)
// for (index=0; index<boxdim*box2; index++){
// idz = index%(boxdim);
// ib2 = index%(box2);
// idx = (index-ib2)/(box2);
// idy = (ib2-idz)/boxdim;
// b[idx][idy][idz].head=-1;
// }
for (idx=0; idx<boxdim; idx++){
for (idy=0; idy<boxdim; idy++){
for (idz=0; idz<boxdim; idz++){
b[idx][idy][idz].head=-1;
}
}
}
t_init_cells += time_in_seconds()-t0;
t0 = time_in_seconds();
// traverse all particles and assign to boxes
// #pragma omp parallel for schedule(static) private(i, idx, idy, idz, bp) shared(b, next) num_threads(NTHREADS)
for (i=0; i<npos; i++)
{
if (pos_orig[3*i] >= 0){pos[3*i]= fmod(pos_orig[3*i], L);}// OR SINCE PARTICLES moving slowly.. change to -L
else {// pos_orig[i] is negative
pos[3*i] = L-fmod(-1*pos_orig[3*i], L);
}
if (pos_orig[3*i+1] >= 0){pos[3*i+1]= fmod(pos_orig[3*i+1], L);}// OR SINCE PARTICLES moving slowly.. change to -L
else {// pos_orig[i] is negative
pos[3*i+1] = L-fmod(-1*pos_orig[3*i+1], L);
}
if (pos_orig[3*i+2] >= 0){pos[3*i+2]= fmod(pos_orig[3*i+2], L);}// OR SINCE PARTICLES moving slowly.. change to -L
else {// pos_orig[i] is negative
pos[3*i+2] = L-fmod(-1*pos_orig[3*i+2], L);
}
if (pos[3*i]<0){printf("pos_orig = %lf pos defect = %lf and i = %d and L =%lf\n", pos_orig[3*i], pos[3*i], i, L);}
// initialize entry of implied linked list
next[i] = -1;
forces[3*i+0] = 0; forces[3*i+1] = 0; forces[3*i+2] = 0; // re-initialising interaction forces at each time step
// which box does the particle belong to?
// assumes particles have positions within [0,L]^3
idx = (int)(pos[3*i ]/L*boxdim);
idy = (int)(pos[3*i+1]/L*boxdim);
idz = (int)(pos[3*i+2]/L*boxdim);
// add to beginning of implied linked list
bp = &b[idx][idy][idz];
// next[i] = bp->head; // next = previous (my notation)
// #pragma omp critical
// {
next[i] = bp->head; // next = previous (my notation)
bp->head = i; // head = latest (my notation)
// }
}
t_assign_to_cells += time_in_seconds()-t0;
t0 = time_in_seconds();
#pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f, idx, idy, idz, ib2, bp) shared(b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, box2) num_threads(NTHREADS)
for (index=0; index<boxdim*box2; index++){
idz = index%(boxdim);
ib2 = index%(box2);
idx = (index-ib2)/(box2);
idy = (ib2-idz)/boxdim;
bp = &b[idx][idy][idz];
// interactions within and other boxes
#pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f) shared(bp, b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, idx, idy, idz)// num_threads(NTHREADS)
for (j=0; j<NUM_BOX_NEIGHBORS; j++)
{
neigh_idx = (idx + box_neighbors[j][0] + boxdim) % boxdim;
neigh_idy = (idy + box_neighbors[j][1] + boxdim) % boxdim;
neigh_idz = (idz + box_neighbors[j][2] + boxdim) % boxdim;
neigh_bp = &b[neigh_idx][neigh_idy][neigh_idz];
// when using boxes, the minimum image computation is
// known beforehand, thus we can compute position offsets
// to compensate for wraparound when computing distances
double xoffset = 0.;
double yoffset = 0.;
double zoffset = 0.;
if (idx + box_neighbors[j][0] == -1) xoffset = -L;
if (idy + box_neighbors[j][1] == -1) yoffset = -L;
if (idz + box_neighbors[j][2] == -1) zoffset = -L;
if (idx + box_neighbors[j][0] == boxdim) xoffset = L;
if (idy + box_neighbors[j][1] == boxdim) yoffset = L;
if (idz + box_neighbors[j][2] == boxdim) zoffset = L;
// NOTE: modifying the function to update the forces
p1 = neigh_bp->head;
while (p1 != -1)
{
p2 = bp->head;
while (p2 != -1)
{
// compute distance vector
dx = pos[3*p1+0] - pos[3*p2+0] + xoffset;
dy = pos[3*p1+1] - pos[3*p2+1] + yoffset;
dz = pos[3*p1+2] - pos[3*p2+2] + zoffset;
d2 = dx*dx+dy*dy+dz*dz+my_EPS;
if ( d2<4.0*a_sq)
{
s = sqrt(d2);
f = krepul*(2*a-s);
#pragma omp atomic
forces[3*p1+0] += f*dx/s;
#pragma omp atomic
forces[3*p1+1] += f*dy/s;
#pragma omp atomic
forces[3*p1+2] += f*dz/s;
#pragma omp atomic
forces[3*p2+0] -= f*dx/s;
#pragma omp atomic
forces[3*p2+1] -= f*dy/s;
#pragma omp atomic
forces[3*p2+2] -= f*dz/s;
}
p2 = next[p2];
}
p1 = next[p1];
}
}
}
t_force += time_in_seconds() - t0;
t0 = time_in_seconds();
// printf("Calculating the Hydrodynamic Interations for the given particle positions\n");
// au = upper triangular matrix with hydrodynamic interaction values
// pos = wrapped up position inside the box_width = L;
// rad = radius of particles; xi, nr, nk are constants.
for (int p1=0; p1<3*npos*3*npos; p1++){
au[p1] = 0;
}
rpy_ewald(npos, au, pos, L, rad, xi, nr, nk);
t_hd += time_in_seconds() - t0;
// print_matrix(au, 3*npos);
// printf("Getting the cholesky decomposition\n");
t0 = time_in_seconds();
// LAPACKE_dpotrf(LAPACK_ROW_MAJOR, 'U', 3*npos, au, 3*npos);
// double* au_c = cho2(au, 3*npos);
au = cho2(au, 3*npos);
/*
double m1[] = {25, 15, -5,
15, 18, 0,
-5, 0, 11};
double *c1 = cho2(m1, 3);
show_matrix(c1, 3);
my_transpose(c1, 3);
show_matrix(c1, 3);
*/
t_cho += time_in_seconds() - t0;
// Get interations vector by multiplying l_cols by buf)
// print_matrix(au_c, 3*npos);
// print_matrix(au, 3*npos);
// printf("Multiplying by random gaussian vector \n");
t0 = time_in_seconds();
// generate random values from standard normal distribution
// note: this MKL function is sequential but vectorized
vdRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, stream, 3*npos, buf, 0., 1.);
cblas_dgemm(CblasRowMajor, CblasTrans, CblasTrans, 3*npos, 1, 3*npos, 1, au, 3*npos, buf, 3*npos, 0, hd_vec, 1);
// print_array(buf, 3*npos);
// printf("printing the correlation vector\n");
// print_array(hd_vec, 3*npos);
// update positions with Brownian displacements
#pragma omp parallel for schedule(static) shared(pos_orig) private(i) num_threads(NTHREADS)
for (int i=0; i<3*npos; i++)
{
// pos_orig[i] += forces[i]*DELTAT+f_const*buf[i];
pos_orig[i] += forces[i]*DELTAT+f_const*hd_vec[i];
}
t_update_pos += time_in_seconds() - t0;
}
printf("--------------------------------------------------------\n");
printf("Time: %f for initiating the cell head \n", t_init_cells);
printf("Time: %f for assigning particles to cells \n", t_assign_to_cells);
printf("Time: %f for force calculations \n", t_force);
printf("Time: %f for hydrodynamic \n", t_hd);
printf("Time: %f for cholesky \n", t_cho);
printf("Time: %f for pos update \n", t_update_pos);
printf("--------------------------------------------------------\n");
return 0;
}
|
yolov2.h | #ifndef YOLOV2
#define YOLOV2
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <fcntl.h>
#include <string.h>
#define FLT_MAX 3.402823466e+38F /* max value */
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#include "cnn.h"
typedef enum{
LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
} ACTIVATION;
typedef enum {
CONVOLUTIONAL,
DECONVOLUTIONAL,
CONNECTED,
MAXPOOL,
SOFTMAX,
DETECTION,
DROPOUT,
CROP,
ROUTE,
COST,
NORMALIZATION,
AVGPOOL,
LOCAL,
SHORTCUT,
ACTIVE,
RNN,
GRU,
LSTM,
CRNN,
BATCHNORM,
NETWORK,
XNOR,
REGION,
YOLO,
REORG,
UPSAMPLE,
LOGXENT,
L2NORM,
BLANK
} LAYER_TYPE;
struct network;
typedef struct network network;
struct layer;
typedef struct layer layer;
struct layer{
LAYER_TYPE type;
ACTIVATION activation;
void (*forward) (struct layer, struct network);
int batch_normalize;
int shortcut;
int batch;
int forced;
int flipped;
int inputs;
int outputs;
int nweights;
int nbiases;
int extra;
int truths;
int h,w,c;
int out_h, out_w, out_c;
int n;
int max_boxes;
int groups;
int size;
int side;
int stride;
int reverse;
int flatten;
int spatial;
int pad;
int sqrt;
int flip;
int index;
int binary;
int xnor;
int steps;
int hidden;
int truth;
float smooth;
float dot;
float angle;
float jitter;
float saturation;
float exposure;
float shift;
float ratio;
float learning_rate_scale;
float clip;
int softmax;
int classes;
int coords;
int background;
int rescore;
int objectness;
int joint;
int noadjust;
int reorg;
int log;
int tanh;
int *mask;
int total;
float alpha;
float beta;
float kappa;
float coord_scale;
float object_scale;
float noobject_scale;
float mask_scale;
float class_scale;
int bias_match;
int random;
float ignore_thresh;
float truth_thresh;
float thresh;
float focus;
int classfix;
int absolute;
int onlyforward;
int stopbackward;
// int dontload;
int dontsave;
// int dontloadscales;
float temperature;
float probability;
float scale;
char * cweights;
int * indexes;
int * input_layers;
int * input_sizes;
int * map;
float * rand;
float * cost;
float * state;
float * prev_state;
float * forgot_state;
float * forgot_delta;
float * state_delta;
float * combine_cpu;
float * combine_delta_cpu;
float * concat;
float * concat_delta;
float * binary_weights;
float * biases;
float * bias_updates;
float * scales;
float * scale_updates;
float * weights;
float * weight_updates;
float * delta;
float * output;
float * loss;
float * squared;
float * norms;
float * spatial_mean;
float * mean;
float * variance;
float * mean_delta;
float * variance_delta;
float * rolling_mean;
float * rolling_variance;
float * x;
float * x_norm;
float * m;
float * v;
float * bias_m;
float * bias_v;
float * scale_m;
float * scale_v;
float *z_cpu;
float *r_cpu;
float *h_cpu;
float * prev_state_cpu;
float *temp_cpu;
float *temp2_cpu;
float *temp3_cpu;
float *dh_cpu;
float *hh_cpu;
float *prev_cell_cpu;
float *cell_cpu;
float *f_cpu;
float *i_cpu;
float *g_cpu;
float *o_cpu;
float *c_cpu;
float *dc_cpu;
float * binary_input;
struct layer *input_layer;
struct layer *self_layer;
struct layer *output_layer;
struct layer *reset_layer;
struct layer *update_layer;
struct layer *state_layer;
struct layer *input_gate_layer;
struct layer *state_gate_layer;
struct layer *input_save_layer;
struct layer *state_save_layer;
struct layer *input_state_layer;
struct layer *state_state_layer;
struct layer *input_z_layer;
struct layer *state_z_layer;
struct layer *input_r_layer;
struct layer *state_r_layer;
struct layer *input_h_layer;
struct layer *state_h_layer;
struct layer *wz;
struct layer *uz;
struct layer *wr;
struct layer *ur;
struct layer *wh;
struct layer *uh;
struct layer *uo;
struct layer *wo;
struct layer *uf;
struct layer *wf;
struct layer *ui;
struct layer *wi;
struct layer *ug;
struct layer *wg;
//tree *softmax_tree;
size_t workspace_size;
};
void free_layer(layer l)
{
if(l.cweights) free(l.cweights);
if(l.indexes) free(l.indexes);
if(l.input_layers) free(l.input_layers);
if(l.input_sizes) free(l.input_sizes);
if(l.map) free(l.map);
if(l.rand) free(l.rand);
if(l.cost) free(l.cost);
if(l.state) free(l.state);
if(l.prev_state) free(l.prev_state);
if(l.forgot_state) free(l.forgot_state);
if(l.forgot_delta) free(l.forgot_delta);
if(l.state_delta) free(l.state_delta);
if(l.concat) free(l.concat);
if(l.concat_delta) free(l.concat_delta);
if(l.binary_weights) free(l.binary_weights);
if(l.biases) free(l.biases);
if(l.bias_updates) free(l.bias_updates);
if(l.scales) free(l.scales);
if(l.scale_updates) free(l.scale_updates);
if(l.weights) free(l.weights);
if(l.weight_updates) free(l.weight_updates);
if(l.delta) free(l.delta);
if(l.output) free(l.output);
if(l.squared) free(l.squared);
if(l.norms) free(l.norms);
if(l.spatial_mean) free(l.spatial_mean);
if(l.mean) free(l.mean);
if(l.variance) free(l.variance);
if(l.mean_delta) free(l.mean_delta);
if(l.variance_delta) free(l.variance_delta);
if(l.rolling_mean) free(l.rolling_mean);
if(l.rolling_variance) free(l.rolling_variance);
if(l.x) free(l.x);
if(l.x_norm) free(l.x_norm);
if(l.m) free(l.m);
if(l.v) free(l.v);
if(l.z_cpu) free(l.z_cpu);
if(l.r_cpu) free(l.r_cpu);
if(l.h_cpu) free(l.h_cpu);
if(l.binary_input) free(l.binary_input);
}
//void free_layer(layer);
typedef enum {
CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM
} learning_rate_policy;
typedef struct network{
int n;
int batch;
size_t *seen;
int *t;
float epoch;
int subdivisions;
layer *layers;
float *output;
learning_rate_policy policy;
float learning_rate;
float momentum;
float decay;
float gamma;
float scale;
float power;
int time_steps;
int step;
int max_batches;
float *scales;
int *steps;
int num_steps;
int burn_in;
int adam;
float B1;
float B2;
float eps;
int inputs;
int outputs;
int truths;
int notruth;
int h, w, c;
int max_crop;
int min_crop;
float max_ratio;
float min_ratio;
int center;
float angle;
float aspect;
float exposure;
float saturation;
float hue;
int random;
int gpu_index;
// tree *hierarchy;
float *input;
float *truth;
float *delta;
float *workspace;
int train;
int index;
float *cost;
float clip;
} network;
network *make_network(int n);
layer get_network_output_layer(network *net);
typedef struct {
int w;
int h;
float scale;
float rad;
float dx;
float dy;
float aspect;
} augment_args;
typedef struct {
int w;
int h;
int c;
float *data;
} image;
typedef struct{
float x, y, w, h;
} box;
typedef struct detection{
box bbox;
int classes;
float *prob;
float *mask;
float objectness;
int sort_class;
} detection;
typedef struct matrix{
int rows, cols;
float **vals;
} matrix;
typedef struct{
int w, h;
matrix X;
matrix y;
int shallow;
int *num_boxes;
box **boxes;
} data;
typedef enum {
CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA, LETTERBOX_DATA, REGRESSION_DATA, SEGMENTATION_DATA, INSTANCE_DATA
} data_type;
typedef struct load_args{
int threads;
char **paths;
char *path;
int n;
int m;
char **labels;
int h;
int w;
int out_w;
int out_h;
int nh;
int nw;
int num_boxes;
int min, max, size;
int classes;
int background;
int scale;
int center;
int coords;
float jitter;
float angle;
float aspect;
float saturation;
float exposure;
float hue;
data *d;
image *im;
image *resized;
data_type type;
// tree *hierarchy;
} load_args;
typedef struct{
int id;
float x,y,w,h;
float left, right, top, bottom;
} box_label;
//network *load_network(char *cfg, char *weights, int clear);
//load_args get_base_args(network *net);
//void free_data(data d);
typedef struct{
char *key;
char *val;
int used;
} kvp;
typedef struct node{
void *val;
struct node *next;
struct node *prev;
} node;
typedef struct list{
int size;
node *front;
node *back;
} list;
void error(const char *s)
{
perror(s);
assert(0);
exit(-1);
}
void malloc_error()
{
fprintf(stderr, "Malloc error\n");
exit(-1);
}
void file_error(char *s)
{
fprintf(stderr, "Couldn't open file: %s\n", s);
exit(0);
}
/////////////////list begin
list *make_list()
{
list *l = (list *)malloc(sizeof(list));
l->size = 0;
l->front = 0;
l->back = 0;
return l;
}
void *list_pop(list *l){
if(!l->back) return 0;
node *b = l->back;
void *val = b->val;
l->back = b->prev;
if(l->back) l->back->next = 0;
free(b);
--l->size;
return val;
}
void list_insert(list *l, void *val)
{
node *new_node = (node *)malloc(sizeof(node));
new_node->val = val;
new_node->next = 0;
if(!l->back){
l->front = new_node;
new_node->prev = 0;
}else{
l->back->next = new_node;
new_node->prev = l->back;
}
l->back = new_node;
++l->size;
}
void free_node(node *n)
{
node *next;
while(n) {
next = n->next;
free(n);
n = next;
}
}
void free_list(list *l)
{
free_node(l->front);
free(l);
}
void free_list_contents(list *l)
{
node *n = l->front;
while(n){
free(n->val);
n = n->next;
}
}
void **list_to_array(list *l)
{
void **a = (void **)calloc(l->size, sizeof(void*));
int count = 0;
node *n = l->front;
while(n){
a[count++] = n->val;
n = n->next;
}
return a;
}
/////////////////list end
/////////////////////utils begin
void del_arg(int argc, char **argv, int index)
{
int i;
for(i = index; i < argc-1; ++i) argv[i] = argv[i+1];
argv[i] = 0;
}
int find_arg(int argc, char* argv[], char *arg)
{
int i;
for(i = 0; i < argc; ++i) {
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)) {
del_arg(argc, argv, i);
return 1;
}
}
return 0;
}
int find_int_arg(int argc, char **argv, char *arg, int def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atoi(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
float find_float_arg(int argc, char **argv, char *arg, float def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atof(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
char *find_char_arg(int argc, char **argv, char *arg, char *def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = argv[i+1];
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
unsigned char *read_file(char *filename)
{
FILE *fp = fopen(filename, "rb");
size_t size;
fseek(fp, 0, SEEK_END);
size = ftell(fp);
fseek(fp, 0, SEEK_SET);
unsigned char *text = (unsigned char *)calloc(size+1, sizeof(unsigned char));
fread(text, 1, size, fp);
fclose(fp);
return text;
}
list *split_str(char *s, char delim)
{
size_t i;
size_t len = strlen(s);
list *l = make_list();
list_insert(l, s);
for(i = 0; i < len; ++i){
if(s[i] == delim){
s[i] = '\0';
list_insert(l, &(s[i+1]));
}
}
return l;
}
void strip(char *s)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==' '||c=='\t'||c=='\n') ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void strip_char(char *s, char bad)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==bad) ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void free_ptrs(void **ptrs, int n)
{
int i;
for(i = 0; i < n; ++i) free(ptrs[i]);
free(ptrs);
}
char *fgetl(FILE *fp)
{
if(feof(fp)) return 0;
size_t size = 512;
char *line = (char *)malloc(size*sizeof(char));
if(!fgets(line, size, fp)){
free(line);
return 0;
}
size_t curr = strlen(line);
while((line[curr-1] != '\n') && !feof(fp)){
if(curr == size-1){
size *= 2;
line = (char *)realloc(line, size*sizeof(char));
if(!line) {
printf("%ld\n", size);
malloc_error();
}
}
size_t readsize = size-curr;
if(readsize > INT_MAX) readsize = INT_MAX-1;
fgets(&line[curr], readsize, fp);
curr = strlen(line);
}
if(line[curr-1] == '\n') line[curr-1] = '\0';
return line;
}
/////////////////////utils end
////////////////////option_list begin
void option_insert(list *l, char *key, char *val)
{
kvp *p = (kvp *)malloc(sizeof(kvp));
p->key = key;
p->val = val;
p->used = 0;
list_insert(l, p);
}
int read_option(char *s, list *options)
{
size_t i;
size_t len = strlen(s);
char *val = 0;
for(i = 0; i < len; ++i){
if(s[i] == '='){
s[i] = '\0';
val = s+i+1;
break;
}
}
if(i == len-1) return 0;
char *key = s;
option_insert(options, key, val);
return 1;
}
void option_unused(list *l)
{
node *n = l->front;
while(n){
kvp *p = (kvp *)n->val;
if(!p->used){
fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val);
}
n = n->next;
}
}
char *option_find(list *l, char *key)
{
node *n = l->front;
while(n){
kvp *p = (kvp *)n->val;
if(strcmp(p->key, key) == 0){
p->used = 1;
return p->val;
}
n = n->next;
}
return 0;
}
char *option_find_str(list *l, char *key, char *def)
{
char *v = option_find(l, key);
if(v) return v;
if(def) fprintf(stderr, "%s: Using default '%s'\n", key, def);
return def;
}
int option_find_int(list *l, char *key, int def)
{
char *v = option_find(l, key);
if(v) return atoi(v);
fprintf(stderr, "%s: Using default '%d'\n", key, def);
return def;
}
int option_find_int_quiet(list *l, char *key, int def)
{
char *v = option_find(l, key);
if(v) return atoi(v);
return def;
}
float option_find_float_quiet(list *l, char *key, float def)
{
char *v = option_find(l, key);
if(v) return atof(v);
return def;
}
float option_find_float(list *l, char *key, float def)
{
char *v = option_find(l, key);
if(v) return atof(v);
fprintf(stderr, "%s: Using default '%lf'\n", key, def);
return def;
}
list *read_data_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
if(file == 0) file_error(filename);
char *line;
int nu = 0;
list *options = make_list();
while((line=fgetl(file)) != 0){
++ nu;
strip(line);
switch(line[0]){
case '\0':
case '#':
case ';':
free(line);
break;
default:
if(!read_option(line, options)){
fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
free(line);
}
break;
}
}
fclose(file);
return options;
}
///////////////////option_list end
image make_empty_image(int w, int h, int c)
{
image out;
out.data = 0;
out.h = h;
out.w = w;
out.c = c;
return out;
}
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
image make_image(int w, int h, int c)
{
image out = make_empty_image(w,h,c);
out.data = (float *)calloc(h*w*c, sizeof(float));
return out;
}
static float get_pixel(image m, int x, int y, int c)
{
assert(x < m.w && y < m.h && c < m.c);
return m.data[c*m.h*m.w + y*m.w + x];
}
static void set_pixel(image m, int x, int y, int c, float val)
{
if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return;
assert(x < m.w && y < m.h && c < m.c);
m.data[c*m.h*m.w + y*m.w + x] = val;
}
static void add_pixel(image m, int x, int y, int c, float val)
{
assert(x < m.w && y < m.h && c < m.c);
m.data[c*m.h*m.w + y*m.w + x] += val;
}
void free_image(image m)
{
if(m.data){
free(m.data);
}
}
image resize_image(image im, int w, int h)
{
image resized = make_image(w, h, im.c);
image part = make_image(w, im.h, im.c);
int r, c, k;
float w_scale = (float)(im.w - 1) / (w - 1);
float h_scale = (float)(im.h - 1) / (h - 1);
for(k = 0; k < im.c; ++k){
for(r = 0; r < im.h; ++r){
for(c = 0; c < w; ++c){
float val = 0;
if(c == w-1 || im.w == 1){
val = get_pixel(im, im.w-1, r, k);
} else {
float sx = c*w_scale;
int ix = (int) sx;
float dx = sx - ix;
val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix+1, r, k);
}
set_pixel(part, c, r, k, val);
}
}
}
for(k = 0; k < im.c; ++k){
for(r = 0; r < h; ++r){
float sy = r*h_scale;
int iy = (int) sy;
float dy = sy - iy;
for(c = 0; c < w; ++c){
float val = (1-dy) * get_pixel(part, c, iy, k);
set_pixel(resized, c, r, k, val);
}
if(r == h-1 || im.h == 1) continue;
for(c = 0; c < w; ++c){
float val = dy * get_pixel(part, c, iy+1, k);
add_pixel(resized, c, r, k, val);
}
}
}
free_image(part);
return resized;
}
void fill_image(image m, float s)
{
int i;
for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] = s;
}
void embed_image(image source, image dest, int dx, int dy)
{
int x,y,k;
for(k = 0; k < source.c; ++k){
for(y = 0; y < source.h; ++y){
for(x = 0; x < source.w; ++x){
float val = get_pixel(source, x,y,k);
set_pixel(dest, dx+x, dy+y, k, val);
}
}
}
}
image letterbox_image(image im, int w, int h)
{
int new_w = im.w;
int new_h = im.h;
if (((float)w/im.w) < ((float)h/im.h)) {
new_w = w;
new_h = (im.h * w)/im.w;
} else {
new_h = h;
new_w = (im.w * h)/im.h;
}
image resized = resize_image(im, new_w, new_h);
image boxed = make_image(w, h, im.c);
fill_image(boxed, .5);
//int i;
//for(i = 0; i < boxed.w*boxed.h*boxed.c; ++i) boxed.data[i] = 0;
embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2);
free_image(resized);
return boxed;
}
image load_image_stb(char *filename, int channels)
{
int w, h, c;
unsigned char *data = stbi_load(filename, &w, &h, &c, channels);
if (!data) {
fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason());
exit(0);
}
if(channels) c = channels;
int i,j,k;
image im = make_image(w, h, c);
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int dst_index = i + w*j + w*h*k;
int src_index = k + c*i + c*w*j;
im.data[dst_index] = (float)data[src_index]/255.;
}
}
}
free(data);
return im;
}
void save_image_png(image im, const char *name)
{
char buff[256];
//sprintf(buff, "%s (%d)", name, windows);
sprintf(buff, "%s.png", name);
unsigned char *data = (unsigned char *)calloc(im.w*im.h*im.c, sizeof(char));
int i,k;
for(k = 0; k < im.c; ++k){
for(i = 0; i < im.w*im.h; ++i){
data[i*im.c+k] = (unsigned char) (255*im.data[i + k*im.w*im.h]);
}
}
int success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c);
free(data);
if(!success) fprintf(stderr, "Failed to write image %s\n", buff);
}
image **load_alphabet()
{
int i, j;
const int nsize = 8;
image **alphabets = (image **)calloc(nsize, sizeof(image));
for(j = 0; j < nsize; ++j){
alphabets[j] = (image *)calloc(128, sizeof(image));
for(i = 32; i < 127; ++i){
char buff[256];
sprintf(buff, "labels/%d_%d.png", i, j);
//alphabets[j][i] = load_image_color(buff, 0, 0);
alphabets[j][i] = load_image_stb(buff, 3);
}
}
return alphabets;
}
///////////////////activation begin
static inline float stair_activate(float x)
{
int n = floor(x);
if (n%2 == 0) return floor(x/2.);
else return (x - n) + floor(x/2.);
}
static inline float hardtan_activate(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
static inline float linear_activate(float x){return x;}
static inline float logistic_activate(float x){return 1./(1. + exp(-x));}
static inline float loggy_activate(float x){return 2./(1. + exp(-x)) - 1;}
static inline float relu_activate(float x){return x*(x>0);}
static inline float elu_activate(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);}
static inline float relie_activate(float x){return (x>0) ? x : .01*x;}
static inline float ramp_activate(float x){return x*(x>0)+.1*x;}
static inline float leaky_activate(float x){return (x>0) ? x : .1*x;}
static inline float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);}
static inline float plse_activate(float x)
{
if(x < -4) return .01 * (x + 4);
if(x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
static inline float lhtan_activate(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
static inline float lhtan_gradient(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
static inline float hardtan_gradient(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
static inline float linear_gradient(float x){return 1;}
static inline float logistic_gradient(float x){return (1-x)*x;}
static inline float loggy_gradient(float x)
{
float y = (x+1.)/2.;
return 2*(1-y)*y;
}
static inline float stair_gradient(float x)
{
if (floor(x) == x) return 0;
return 1;
}
static inline float relu_gradient(float x){return (x>0);}
static inline float elu_gradient(float x){return (x >= 0) + (x < 0)*(x + 1);}
static inline float relie_gradient(float x){return (x>0) ? 1 : .01;}
static inline float ramp_gradient(float x){return (x>0)+.1;}
static inline float leaky_gradient(float x){return (x>0) ? 1 : .1;}
static inline float tanh_gradient(float x){return 1-x*x;}
static inline float plse_gradient(float x){return (x < 0 || x > 1) ? .01 : .125;}
char *get_activation_string(ACTIVATION a)
{
switch(a){
case LOGISTIC:
return "logistic";
case LOGGY:
return "loggy";
case RELU:
return "relu";
case ELU:
return "elu";
case RELIE:
return "relie";
case RAMP:
return "ramp";
case LINEAR:
return "linear";
case TANH:
return "tanh";
case PLSE:
return "plse";
case LEAKY:
return "leaky";
case STAIR:
return "stair";
case HARDTAN:
return "hardtan";
case LHTAN:
return "lhtan";
default:
break;
}
return "relu";
}
ACTIVATION get_activation(char *s)
{
if (strcmp(s, "logistic")==0) return LOGISTIC;
if (strcmp(s, "loggy")==0) return LOGGY;
if (strcmp(s, "relu")==0) return RELU;
if (strcmp(s, "elu")==0) return ELU;
if (strcmp(s, "relie")==0) return RELIE;
if (strcmp(s, "plse")==0) return PLSE;
if (strcmp(s, "hardtan")==0) return HARDTAN;
if (strcmp(s, "lhtan")==0) return LHTAN;
if (strcmp(s, "linear")==0) return LINEAR;
if (strcmp(s, "ramp")==0) return RAMP;
if (strcmp(s, "leaky")==0) return LEAKY;
if (strcmp(s, "tanh")==0) return TANH;
if (strcmp(s, "stair")==0) return STAIR;
fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s);
return RELU;
}
float activate(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate(x);
case LOGISTIC:
return logistic_activate(x);
case LOGGY:
return loggy_activate(x);
case RELU:
return relu_activate(x);
case ELU:
return elu_activate(x);
case RELIE:
return relie_activate(x);
case RAMP:
return ramp_activate(x);
case LEAKY:
return leaky_activate(x);
case TANH:
return tanh_activate(x);
case PLSE:
return plse_activate(x);
case STAIR:
return stair_activate(x);
case HARDTAN:
return hardtan_activate(x);
case LHTAN:
return lhtan_activate(x);
}
return 0;
}
void activate_array(float *x, const int n, const ACTIVATION a)
{
int i;
for(i = 0; i < n; ++i){
x[i] = activate(x[i], a);
}
}
float gradient(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_gradient(x);
case LOGISTIC:
return logistic_gradient(x);
case LOGGY:
return loggy_gradient(x);
case RELU:
return relu_gradient(x);
case ELU:
return elu_gradient(x);
case RELIE:
return relie_gradient(x);
case RAMP:
return ramp_gradient(x);
case LEAKY:
return leaky_gradient(x);
case TANH:
return tanh_gradient(x);
case PLSE:
return plse_gradient(x);
case STAIR:
return stair_gradient(x);
case HARDTAN:
return hardtan_gradient(x);
case LHTAN:
return lhtan_gradient(x);
}
return 0;
}
///////////////////activation end
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
}
void fill_cpu(int N, float ALPHA, float *X, int INCX)
{
int i;
for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
}
void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
{
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
//printf("shorcut_layer batch=%d,stride=%d,sample=%d\n",batch,stride,sample);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int i,j,k,b;
for(b = 0; b < batch; ++b){
for(k = 0; k < minc; ++k){
for(j = 0; j < minh; ++j){
for(i = 0; i < minw; ++i){
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = s1*out[out_index] + s2*add[add_index];
}
}
}
}
}
void forward_shortcut_layer(const layer l, network net)
{
//copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
//shortcut_cpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output);
//activate_array(l.output, l.outputs*l.batch, l.activation);
int w = l.w;
int h = l.h;
int c = l.c;
float *add = net.layers[l.index].output;
float *out = l.output;
float *in = net.input;
int i,j,k;
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int index = i + w*(j + h*k );
out[index] = in[index] + add[index];
}
}
}
}
layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
{
fprintf(stderr, "res %3d %4d x%4d x%4d -> %4d x%4d x%4d\n",index, w2,h2,c2, w,h,c);
layer l;
memset(&l,0,sizeof(layer));
l.type = SHORTCUT;
l.batch = batch;
l.w = w2;
l.h = h2;
l.c = c2;
l.out_w = w;
l.out_h = h;
l.out_c = c;
l.outputs = w*h*c;
l.inputs = l.outputs;
l.index = index;
l.output = (float *)calloc(l.outputs*batch, sizeof(float));;
l.forward = forward_shortcut_layer;
return l;
}
int convolutional_out_height(layer l)
{
return (l.h + 2*l.pad - l.size) / l.stride + 1;
}
int convolutional_out_width(layer l)
{
return (l.w + 2*l.pad - l.size) / l.stride + 1;
}
static size_t get_workspace_size(layer l){
return (size_t)l.out_h*l.out_w*l.size*l.size*l.c/l.groups*sizeof(float);
}
void add_bias(float *output, float *biases, int batch, int n, int size)
{
int i,j,b;
for(b = 0; b < batch; ++b){
for(i = 0; i < n; ++i){
for(j = 0; j < size; ++j){
output[(b*n + i)*size + j] += biases[i];
}
}
}
}
void scale_bias(float *output, float *scales, int batch, int n, int size)
{
int i,j,b;
for(b = 0; b < batch; ++b){
for(i = 0; i < n; ++i){
for(j = 0; j < size; ++j){
output[(b*n + i)*size + j] *= scales[i];
}
}
}
}
float im2col_get_pixel(float *im, int height, int width, int channels,
int row, int col, int channel, int pad)
{
row -= pad;
col -= pad;
if (row < 0 || col < 0 ||
row >= height || col >= width) return 0;
return im[col + width*(row + height*channel)];
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
int c,h,w;
int height_col = (height + 2*pad - ksize) / stride + 1;
int width_col = (width + 2*pad - ksize) / stride + 1;
int channels_col = channels * ksize * ksize;
for (c = 0; c < channels_col; ++c) {
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
// #pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else if(TA && !TB)
// gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else if(!TA && TB)
// gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else
// gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int b, f, i;
for(b = 0; b < batch; ++b){
for(f = 0; f < filters; ++f){
for(i = 0; i < spatial; ++i){
int index = b*filters*spatial + f*spatial + i;
x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f);
}
}
}
}
void forward_batchnorm_layer(layer l, network net)//for conv
{
normalize_cpu(l.output, l.rolling_mean, l.rolling_variance, l.batch, l.out_c, l.out_h*l.out_w);
scale_bias(l.output, l.scales, l.batch, l.out_c, l.out_h*l.out_w);
add_bias(l.output, l.biases, l.batch, l.out_c, l.out_h*l.out_w);
}
void CONV_Padding_Relu(float *Input,float *Output,float *Weight,const int InFM_num,const int OutFM_num,const int Kernel_size,const int Kernel_stride,const int Input_w,const int Input_h,const int Padding)
{
// (output_w - 1)*Kernel_stride + Kernel_size = Input_w
const int output_w = (Input_w - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
const int output_h = (Input_h - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
int x, y, of, inf;
int m,n;
for( of = 0; of < OutFM_num; of++){
for( y = 0; y < output_h; y++) {
for( x = 0; x < output_w; x++){
float tmp = 0.0;
for(inf = 0;inf < InFM_num; inf++)
{
int intput_offset = inf*Input_w*Input_h + (y*Kernel_stride - Padding)*Input_w + x*Kernel_stride - Padding;
for(m = 0;m < Kernel_size; m++)
{
for(n = 0;n < Kernel_size; n++)
{
int kernel_offset = of*InFM_num*Kernel_size*Kernel_size + inf*Kernel_size*Kernel_size;
bool inFM_width = ((x*Kernel_stride + n - Padding) >= 0)&&((x*Kernel_stride + n - Padding) < Input_w);
bool inFM_height = ((y*Kernel_stride + m - Padding) >= 0)&&((y*Kernel_stride + m - Padding) < Input_h);
if(inFM_width&&inFM_height)
tmp += Weight[kernel_offset + m*Kernel_size + n]*Input[intput_offset + m*Input_w + n];
}
}
}
Output[of*output_w*output_h + y*output_w + x] = tmp;
}
}
}
}
void forward_convolutional_layer(layer l, network net)
{
int i, j;
fill_cpu(l.outputs*l.batch, 0, l.output, 1);
//printf("c=%d,n=%d,size=%d,stride=%d,w=%d,h=%d,pad=%d\n",l.c,l.n,l.size,l.stride,l.w,l.h,l.pad);
//int m = l.n/l.groups;
//int k = l.size*l.size*l.c/l.groups;
//int n = l.out_w*l.out_h;
//for(i = 0; i < l.batch; ++i){
// for(j = 0; j < l.groups; ++j){
// float *a = l.weights + j*l.nweights/l.groups;
// float *b = net.workspace;
// float *c = l.output + (i*l.groups + j)*n*m;
// im2col_cpu(net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w,
// l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
// gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
// }
//}
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
float *a = l.weights;
float *b = net.workspace;
float *c = l.output;
im2col_cpu(net.input,l.c, l.h, l.w, l.size, l.stride, l.pad, b);
gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
//CONV_Padding_Relu(net.input,l.output,l.weights,l.c,l.n,l.size,l.stride,l.w,l.h,l.pad);
if(l.batch_normalize){
forward_batchnorm_layer(l, net);
} else {
add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w);
}
activate_array(l.output, l.outputs*l.batch, l.activation);
}
layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam)
{
int i;
layer l;
memset(&l,0,sizeof(layer));
l.type = CONVOLUTIONAL;
l.groups = groups;
l.h = h;
l.w = w;
l.c = c;
l.n = n;
l.binary = binary;
l.xnor = xnor;
l.batch = batch;
l.stride = stride;
l.size = size;
l.pad = padding;
l.batch_normalize = batch_normalize;
// l.weights = (float *)calloc(c/groups*n*size*size, sizeof(float));
// l.biases = (float *)calloc(n, sizeof(float));
l.nweights = c/groups*n*size*size;
l.nbiases = n;
int out_w = convolutional_out_width(l);
int out_h = convolutional_out_height(l);
l.out_h = out_h;
l.out_w = out_w;
l.out_c = n;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = l.w * l.h * l.c;
// l.output = (float *)calloc(l.batch*l.outputs, sizeof(float));
l.forward = forward_convolutional_layer;
if(batch_normalize){
// l.scales = (float *)calloc(n, sizeof(float));
// l.rolling_mean = (float *)calloc(n, sizeof(float));
//l.rolling_variance = (float *)calloc(n, sizeof(float));
}
l.workspace_size = get_workspace_size(l);
l.activation = activation;
fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
return l;
}
void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
int i, j, k, b;
for(b = 0; b < batch; ++b){
for(k = 0; k < c; ++k){
for(j = 0; j < h*stride; ++j){
for(i = 0; i < w*stride; ++i){
int in_index = b*w*h*c + k*w*h + (j/stride)*w + i/stride;
int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
if(forward) out[out_index] = scale*in[in_index];
else in[in_index] += scale*out[out_index];
}
}
}
}
}
void forward_upsample_layer(const layer l, network net)
{
//fill_cpu(l.outputs*l.batch, 0, l.output, 1);
//upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output);
int c = l.c;
int h = l.h;
int w = l.w;
int stride = l.stride;
float *in = net.input;
float *out = l.output;
int i, j, k;
for(k = 0; k < c; ++k){
for(j = 0; j < h*stride; ++j){
for(i = 0; i < w*stride; ++i){
int in_index = k*w*h + (j/stride)*w + i/stride;
int out_index = k*w*h*stride*stride + j*w*stride + i;
out[out_index] = in[in_index];
}
}
}
}
layer make_upsample_layer(int batch, int w, int h, int c, int stride)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = UPSAMPLE;
l.batch = batch;
l.w = w;
l.h = h;
l.c = c;
l.out_w = w*stride;
l.out_h = h*stride;
l.out_c = c;
if(stride < 0){
stride = -stride;
l.reverse=1;
l.out_w = w/stride;
l.out_h = h/stride;
}
l.stride = stride;
l.outputs = l.out_w*l.out_h*l.out_c;
l.inputs = l.w*l.h*l.c;
l.output = (float *)calloc(l.outputs*batch, sizeof(float));;
l.forward = forward_upsample_layer;
if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
return l;
}
void forward_route_layer(const layer l, network net)
{
int i, j;
int offset = 0;
for(i = 0; i < l.n; ++i){
int index = l.input_layers[i];
float *input = net.layers[index].output;
int input_size = l.input_sizes[i];
copy_cpu(input_size, input, 1, l.output + offset, 1);
offset += input_size;
}
}
layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes)
{
fprintf(stderr,"route ");
layer l;
memset(&l,0,sizeof(layer));
l.type = ROUTE;
l.batch = batch;
l.n = n;
l.input_layers = input_layers;
l.input_sizes = input_sizes;
int i;
int outputs = 0;
for(i = 0; i < n; ++i){
fprintf(stderr," %d", input_layers[i]);
outputs += input_sizes[i];
}
fprintf(stderr, "\n");
l.outputs = outputs;
l.inputs = outputs;
// l.output = (float *)calloc(outputs*batch, sizeof(float));;
l.forward = forward_route_layer;
return l;
}
static int entry_index(layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
int loc = location % (l.w*l.h);
return batch*l.outputs + n*l.w*l.h*(4+l.classes+1) + entry*l.w*l.h + loc;
}
void forward_yolo_layer(const layer l, network net)
{
int i,j,b,t,n;
//char line[256];
//FILE *fp3;
//char filename[256];
//sprintf(filename, "yolo_layer_%d.txt", l.outputs);
//printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename);
// if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n");
//int x;
// for( x = 0; x < l.outputs; x++)
//{
// sprintf(line, "%f\n", net.input[x]);
// if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n");
// }
// fclose(fp3);
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, 4);
activate_array(l.output + index, (1+l.classes)*l.w*l.h, LOGISTIC);
}
}
return ;
}
layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes)
{
int i;
layer l;
memset(&l,0,sizeof(layer));
l.type = YOLO;
l.n = n;
l.total = total;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + 4 + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
//l.cost = (float *)calloc(1, sizeof(float));
l.biases = (float *)calloc(total*2, sizeof(float));
if(mask) l.mask = mask;
else{
l.mask = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
l.mask[i] = i;
}
}
//l.bias_updates = (float *)calloc(n*2, sizeof(float));
l.outputs = h*w*n*(classes + 4 + 1);
l.inputs = l.outputs;
//l.truths = 90*(4 + 1);
//l.delta = (float *)calloc(batch*l.outputs, sizeof(float));
l.output = (float *)calloc(batch*l.outputs, sizeof(float));
for(i = 0; i < total*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_yolo_layer;
fprintf(stderr, "detection\n");
srand(0);
return l;
}
/////////////////praser begin
typedef struct{
char *type;
list *options;
}section;
list *read_cfg(char *filename);
LAYER_TYPE string_to_layer_type(char * type)
{
if (strcmp(type, "[shortcut]")==0) return SHORTCUT;
if (strcmp(type, "[crop]")==0) return CROP;
if (strcmp(type, "[cost]")==0) return COST;
if (strcmp(type, "[detection]")==0) return DETECTION;
if (strcmp(type, "[region]")==0) return REGION;
if (strcmp(type, "[yolo]")==0) return YOLO;
if (strcmp(type, "[local]")==0) return LOCAL;
if (strcmp(type, "[conv]")==0
|| strcmp(type, "[convolutional]")==0) return CONVOLUTIONAL;
if (strcmp(type, "[deconv]")==0
|| strcmp(type, "[deconvolutional]")==0) return DECONVOLUTIONAL;
if (strcmp(type, "[activation]")==0) return ACTIVE;
if (strcmp(type, "[logistic]")==0) return LOGXENT;
if (strcmp(type, "[l2norm]")==0) return L2NORM;
if (strcmp(type, "[net]")==0
|| strcmp(type, "[network]")==0) return NETWORK;
if (strcmp(type, "[crnn]")==0) return CRNN;
if (strcmp(type, "[gru]")==0) return GRU;
if (strcmp(type, "[lstm]") == 0) return LSTM;
if (strcmp(type, "[rnn]")==0) return RNN;
if (strcmp(type, "[conn]")==0
|| strcmp(type, "[connected]")==0) return CONNECTED;
if (strcmp(type, "[max]")==0
|| strcmp(type, "[maxpool]")==0) return MAXPOOL;
if (strcmp(type, "[reorg]")==0) return REORG;
if (strcmp(type, "[avg]")==0
|| strcmp(type, "[avgpool]")==0) return AVGPOOL;
if (strcmp(type, "[dropout]")==0) return DROPOUT;
if (strcmp(type, "[lrn]")==0
|| strcmp(type, "[normalization]")==0) return NORMALIZATION;
if (strcmp(type, "[batchnorm]")==0) return BATCHNORM;
if (strcmp(type, "[soft]")==0
|| strcmp(type, "[softmax]")==0) return SOFTMAX;
if (strcmp(type, "[route]")==0) return ROUTE;
if (strcmp(type, "[upsample]")==0) return UPSAMPLE;
return BLANK;
}
void free_section(section *s)
{
free(s->type);
node *n = s->options->front;
while(n){
kvp *pair = (kvp *)n->val;
free(pair->key);
free(pair);
node *next = n->next;
free(n);
n = next;
}
free(s->options);
free(s);
}
void parse_data(char *data, float *a, int n)
{
int i;
if(!data) return;
char *curr = data;
char *next = data;
int done = 0;
for(i = 0; i < n && !done; ++i){
while(*++next !='\0' && *next != ',');
if(*next == '\0') done = 1;
*next = '\0';
sscanf(curr, "%g", &a[i]);
curr = next+1;
}
}
typedef struct size_params{
int batch;
int inputs;
int h;
int w;
int c;
int index;
int time_steps;
network *net;
} size_params;
layer parse_convolutional(list *options, size_params params)
{
int n = option_find_int(options, "filters",1);
int size = option_find_int(options, "size",1);
int stride = option_find_int(options, "stride",1);
int pad = option_find_int_quiet(options, "pad",0);
int padding = option_find_int_quiet(options, "padding",0);
int groups = option_find_int_quiet(options, "groups", 1);
if(pad) padding = size/2;
char *activation_s = option_find_str(options, "activation", "logistic");
ACTIVATION activation = get_activation(activation_s);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before convolutional layer must output image.");
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
int binary = option_find_int_quiet(options, "binary", 0);
int xnor = option_find_int_quiet(options, "xnor", 0);
layer l = make_convolutional_layer(batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, params.net->adam);
l.flipped = option_find_int_quiet(options, "flipped", 0);
l.dot = option_find_float_quiet(options, "dot", 0);
return l;
}
int *parse_yolo_mask(char *a, int *num)
{
int *mask = 0;
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
mask = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
int val = atoi(a);
mask[i] = val;
a = strchr(a, ',')+1;
}
*num = n;
}
return mask;
}
layer parse_yolo(list *options, size_params params)
{
int classes = option_find_int(options, "classes", 20);
int total = option_find_int(options, "num", 1);
int num = total;
char *a = option_find_str(options, "mask", 0);
int *mask = parse_yolo_mask(a, &num);
layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes);
assert(l.outputs == params.inputs);
l.max_boxes = option_find_int_quiet(options, "max",90);
l.jitter = option_find_float(options, "jitter", .2);
l.ignore_thresh = option_find_float(options, "ignore_thresh", .5);
l.truth_thresh = option_find_float(options, "truth_thresh", 1);
l.random = option_find_int_quiet(options, "random", 0);
a = option_find_str(options, "anchors", 0);
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
for(i = 0; i < n; ++i){
float bias = atof(a);
l.biases[i] = bias;
a = strchr(a, ',')+1;
}
}
return l;
}
layer parse_shortcut(list *options, size_params params, network *net)
{
char *l = option_find(options, "from");
int index = atoi(l);
if(index < 0) index = params.index + index;
int batch = params.batch;
layer from = net->layers[index];
layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c);
char *activation_s = option_find_str(options, "activation", "linear");
ACTIVATION activation = get_activation(activation_s);
s.activation = activation;
s.alpha = option_find_float_quiet(options, "alpha", 1);
s.beta = option_find_float_quiet(options, "beta", 1);
return s;
}
layer parse_upsample(list *options, size_params params, network *net)
{
int stride = option_find_int(options, "stride",2);
layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride);
l.scale = option_find_float_quiet(options, "scale", 1);
return l;
}
layer parse_route(list *options, size_params params, network *net)
{
char *l = option_find(options, "layers");
int len = strlen(l);
if(!l) error("Route Layer must specify input layers");
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (l[i] == ',') ++n;
}
int *layers = (int *)calloc(n, sizeof(int));
int *sizes = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
int index = atoi(l);
l = strchr(l, ',')+1;
if(index < 0) index = params.index + index;
layers[i] = index;
sizes[i] = net->layers[index].outputs;
}
int batch = params.batch;
layer route_layer = make_route_layer(batch, n, layers, sizes);
layer first = net->layers[layers[0]];
route_layer.out_w = first.out_w;
route_layer.out_h = first.out_h;
route_layer.out_c = first.out_c;
for(i = 1; i < n; ++i){
int index = layers[i];
layer next = net->layers[index];
if(next.out_w == first.out_w && next.out_h == first.out_h){
route_layer.out_c += next.out_c;
}else{
route_layer.out_h = route_layer.out_w = route_layer.out_c = 0;
}
}
return route_layer;
}
void softmax(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -FLT_MAX;
for(i = 0; i < n; ++i){
if(input[i*stride] > largest) largest = input[i*stride];
}
for(i = 0; i < n; ++i){
float e = exp(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int g, b;
for(b = 0; b < batch; ++b){
for(g = 0; g < groups; ++g){
softmax(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
}
}
void forward_region_layer(const layer l, network net)
{
int i,j,b,t,n;
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
#ifndef GPU
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords);
if(!l.background) activate_array(l.output + index, l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords + 1);
//if(!l.softmax) activate_array(l.output + index, l.classes*l.w*l.h, LOGISTIC);
}
}
if (l.softmax){
int index = entry_index(l, 0, 0, l.coords + !l.background);
softmax_cpu(net.input + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output + index);
}
char line[256];
FILE *fp3;
char filename[256];
sprintf(filename, "yolo_layer_%d.txt", 123123);
printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename);
if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n");
int x;
for( x = 0; x < l.outputs; x++)
{
sprintf(line, "%f\n", net.input[x]);
if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n");
}
fclose(fp3);
#endif
if(!net.train) return;
}
layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = REGION;
l.n = n;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + coords + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
l.coords = coords;
l.biases = (float *)calloc(n*2, sizeof(float));
l.outputs = h*w*n*(classes + coords + 1);
l.inputs = l.outputs;
l.truths = 30*(l.coords + 1);
l.output = (float *)calloc(batch*l.outputs, sizeof(float));
int i;
for(i = 0; i < n*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_region_layer;
fprintf(stderr, "detection\n");
srand(0);
return l;
}
layer parse_region(list *options, size_params params)
{
int coords = option_find_int(options, "coords", 4);
int classes = option_find_int(options, "classes", 20);
int num = option_find_int(options, "num", 1);
layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords);
assert(l.outputs == params.inputs);
l.log = option_find_int_quiet(options, "log", 0);
l.sqrt = option_find_int_quiet(options, "sqrt", 0);
l.softmax = option_find_int(options, "softmax", 0);
l.background = option_find_int_quiet(options, "background", 0);
l.max_boxes = option_find_int_quiet(options, "max",30);
l.jitter = option_find_float(options, "jitter", .2);
l.rescore = option_find_int_quiet(options, "rescore",0);
l.thresh = option_find_float(options, "thresh", .5);
l.classfix = option_find_int_quiet(options, "classfix", 0);
l.absolute = option_find_int_quiet(options, "absolute", 0);
l.random = option_find_int_quiet(options, "random", 0);
l.coord_scale = option_find_float(options, "coord_scale", 1);
l.object_scale = option_find_float(options, "object_scale", 1);
l.noobject_scale = option_find_float(options, "noobject_scale", 1);
l.mask_scale = option_find_float(options, "mask_scale", 1);
l.class_scale = option_find_float(options, "class_scale", 1);
l.bias_match = option_find_int_quiet(options, "bias_match",0);
char *tree_file = option_find_str(options, "tree", 0);
// if (tree_file) l.softmax_tree = read_tree(tree_file);
char *map_file = option_find_str(options, "map", 0);
// if (map_file) l.map = read_map(map_file);
char *a = option_find_str(options, "anchors", 0);
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
for(i = 0; i < n; ++i){
float bias = atof(a);
l.biases[i] = bias;
a = strchr(a, ',')+1;
}
}
return l;
}
void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int b,i,j,k;
int out_c = c/(stride*stride);
for(b = 0; b < batch; ++b){
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int in_index = i + w*(j + h*(k + c*b));
int c2 = k % out_c;
int offset = k / out_c;
int w2 = i*stride + offset % stride;
int h2 = j*stride + offset / stride;
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
}
}
}
}
}
void forward_reorg_layer(const layer l, network net)
{
int i;
//if(l.flatten){
// memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
// if(l.reverse){
// flatten(l.output, l.w*l.h, l.c, l.batch, 0);
// }else{
// flatten(l.output, l.w*l.h, l.c, l.batch, 1);
// }
//} else if (l.extra) {
// for(i = 0; i < l.batch; ++i){
// copy_cpu(l.inputs, net.input + i*l.inputs, 1, l.output + i*l.outputs, 1);
// }
//} else if (l.reverse){
// reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output);
//} else {
reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output);
//}
}
layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = REORG;
l.batch = batch;
l.stride = stride;
l.extra = extra;
l.h = h;
l.w = w;
l.c = c;
l.flatten = flatten;
if(reverse){
l.out_w = w*stride;
l.out_h = h*stride;
l.out_c = c/(stride*stride);
}else{
l.out_w = w/stride;
l.out_h = h/stride;
l.out_c = c*(stride*stride);
}
l.reverse = reverse;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
if(l.extra){
l.out_w = l.out_h = l.out_c = 0;
l.outputs = l.inputs + l.extra;
}
if(extra){
fprintf(stderr, "reorg %4d -> %4d\n", l.inputs, l.outputs);
} else {
fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
}
int output_size = l.outputs * batch;
//l.output = (float *)calloc(output_size, sizeof(float));
l.forward = forward_reorg_layer;
return l;
}
layer parse_reorg(list *options, size_params params)
{
int stride = option_find_int(options, "stride",1);
int reverse = option_find_int_quiet(options, "reverse",0);
int flatten = option_find_int_quiet(options, "flatten",0);
int extra = option_find_int_quiet(options, "extra",0);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before reorg layer must output image.");
layer layer = make_reorg_layer(batch,w,h,c,stride,reverse, flatten, extra);
return layer;
}
void forward_maxpool_layer(layer l, network net)
{
int b,i,j,k,m,n;
int w_offset = -l.pad;
int h_offset = -l.pad;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
for(b = 0; b < l.batch; ++b){
for(k = 0; k < c; ++k){
for(i = 0; i < h; ++i){
for(j = 0; j < w; ++j){
int out_index = j + w*(i + h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for(n = 0; n < l.size; ++n){
for(m = 0; m < l.size; ++m){
int cur_h = h_offset + i*l.stride + n;
int cur_w = w_offset + j*l.stride + m;
int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
float val = (valid != 0) ? net.input[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
l.output[out_index] = max;
l.indexes[out_index] = max_i;
}
}
}
}
}
layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = MAXPOOL;
l.batch = batch;
l.h = h;
l.w = w;
l.c = c;
l.pad = padding;
l.out_w = (w + 2*padding)/stride;
l.out_h = (h + 2*padding)/stride;
l.out_c = c;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
l.size = size;
l.stride = stride;
int output_size = l.out_h * l.out_w * l.out_c * batch;
//l.indexes = (int *)calloc(output_size, sizeof(int));
//l.output = (float *)calloc(output_size, sizeof(float));
l.forward = forward_maxpool_layer;
fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
return l;
}
layer parse_maxpool(list *options, size_params params)
{
int stride = option_find_int(options, "stride",1);
int size = option_find_int(options, "size",stride);
int padding = option_find_int_quiet(options, "padding", (size-1)/2);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before maxpool layer must output image.");
layer maxpool_layer = make_maxpool_layer(batch,h,w,c,size,stride,padding);
return maxpool_layer;
}
learning_rate_policy get_policy(char *s)
{
if (strcmp(s, "random")==0) return RANDOM;
if (strcmp(s, "poly")==0) return POLY;
if (strcmp(s, "constant")==0) return CONSTANT;
if (strcmp(s, "step")==0) return STEP;
if (strcmp(s, "exp")==0) return EXP;
if (strcmp(s, "sigmoid")==0) return SIG;
if (strcmp(s, "steps")==0) return STEPS;
fprintf(stderr, "Couldn't find policy %s, going with constant\n", s);
return CONSTANT;
}
void parse_net_options(list *options, network *net)
{
net->batch = option_find_int(options, "batch",1);
net->learning_rate = option_find_float(options, "learning_rate", .001);
net->momentum = option_find_float(options, "momentum", .9);
net->decay = option_find_float(options, "decay", .0001);
int subdivs = option_find_int(options, "subdivisions",1);
net->time_steps = option_find_int_quiet(options, "time_steps",1);
net->notruth = option_find_int_quiet(options, "notruth",0);
net->batch /= subdivs;
net->batch *= net->time_steps;
net->subdivisions = subdivs;
net->random = option_find_int_quiet(options, "random", 0);
net->adam = option_find_int_quiet(options, "adam", 0);
if(net->adam){
net->B1 = option_find_float(options, "B1", .9);
net->B2 = option_find_float(options, "B2", .999);
net->eps = option_find_float(options, "eps", .0000001);
}
net->h = option_find_int_quiet(options, "height",0);
net->w = option_find_int_quiet(options, "width",0);
net->c = option_find_int_quiet(options, "channels",0);
net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c);
net->max_crop = option_find_int_quiet(options, "max_crop",net->w*2);
net->min_crop = option_find_int_quiet(options, "min_crop",net->w);
net->max_ratio = option_find_float_quiet(options, "max_ratio", (float) net->max_crop / net->w);
net->min_ratio = option_find_float_quiet(options, "min_ratio", (float) net->min_crop / net->w);
net->center = option_find_int_quiet(options, "center",0);
net->clip = option_find_float_quiet(options, "clip", 0);
net->angle = option_find_float_quiet(options, "angle", 0);
net->aspect = option_find_float_quiet(options, "aspect", 1);
net->saturation = option_find_float_quiet(options, "saturation", 1);
net->exposure = option_find_float_quiet(options, "exposure", 1);
net->hue = option_find_float_quiet(options, "hue", 0);
if(!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied");
char *policy_s = option_find_str(options, "policy", "constant");
net->policy = get_policy(policy_s);
net->burn_in = option_find_int_quiet(options, "burn_in", 0);
net->power = option_find_float_quiet(options, "power", 4);
if(net->policy == STEP){
net->step = option_find_int(options, "step", 1);
net->scale = option_find_float(options, "scale", 1);
} else if (net->policy == STEPS){
char *l = option_find(options, "steps");
char *p = option_find(options, "scales");
if(!l || !p) error("STEPS policy must have steps and scales in cfg file");
int len = strlen(l);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (l[i] == ',') ++n;
}
int *steps = (int *)calloc(n, sizeof(int));
float *scales = (float *)calloc(n, sizeof(float));
for(i = 0; i < n; ++i){
int step = atoi(l);
float scale = atof(p);
l = strchr(l, ',')+1;
p = strchr(p, ',')+1;
steps[i] = step;
scales[i] = scale;
}
net->scales = scales;
net->steps = steps;
net->num_steps = n;
} else if (net->policy == EXP){
net->gamma = option_find_float(options, "gamma", 1);
} else if (net->policy == SIG){
net->gamma = option_find_float(options, "gamma", 1);
net->step = option_find_int(options, "step", 1);
} else if (net->policy == POLY || net->policy == RANDOM){
}
net->max_batches = option_find_int(options, "max_batches", 0);
}
int is_network(section *s)
{
return (strcmp(s->type, "[net]")==0
|| strcmp(s->type, "[network]")==0);
}
network *parse_network_cfg(char *filename)
{
list *sections = read_cfg(filename);
node *n = sections->front;
if(!n) error("Config file has no sections");
network *net = make_network(sections->size - 1);
net->gpu_index = -1;
size_params params;
section *s = (section *)n->val;
list *options = s->options;
if(!is_network(s)) error("First section must be [net] or [network]");
parse_net_options(options, net);
params.h = net->h;
params.w = net->w;
params.c = net->c;
params.inputs = net->inputs;
params.batch = net->batch;
params.time_steps = net->time_steps;
params.net = net;
size_t workspace_size = 0;
n = n->next;
int count = 0;
free_section(s);
fprintf(stderr, "layer filters size input output\n");
while(n){
params.index = count;
fprintf(stderr, "%5d ", count);
s = (section *)n->val;
options = s->options;
//layer l = {0};
layer l;
memset(&l,0,sizeof(layer));
LAYER_TYPE lt = string_to_layer_type(s->type);
if(lt == CONVOLUTIONAL){
l = parse_convolutional(options, params);
}else if(lt == YOLO){
l = parse_yolo(options, params);
}else if(lt == ROUTE){
l = parse_route(options, params, net);
}else if(lt == UPSAMPLE){
l = parse_upsample(options, params, net);
}else if(lt == SHORTCUT){
l = parse_shortcut(options, params, net);
}else if(lt == REGION){
l = parse_region(options, params);
}else if(lt == YOLO){
l = parse_yolo(options, params);
}else if(lt == MAXPOOL){
l = parse_maxpool(options, params);
}else if(lt == REORG){
l = parse_reorg(options, params);
}else{
fprintf(stderr, "Type not recognized: %s\n", s->type);
}
l.clip = net->clip;
l.truth = option_find_int_quiet(options, "truth", 0);
l.onlyforward = option_find_int_quiet(options, "onlyforward", 0);
l.stopbackward = option_find_int_quiet(options, "stopbackward", 0);
l.dontsave = option_find_int_quiet(options, "dontsave", 0);
// l.dontload = option_find_int_quiet(options, "dontload", 0);
// l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0);
//l.learning_rate_scale = option_find_float_quiet(options, "learning_rate", 1);
l.smooth = option_find_float_quiet(options, "smooth", 0);
option_unused(options);
net->layers[count] = l;
if (l.workspace_size > workspace_size) workspace_size = l.workspace_size;
free_section(s);
n = n->next;
++count;
if(n){
params.h = l.out_h;
params.w = l.out_w;
params.c = l.out_c;
params.inputs = l.outputs;
}
}
free_list(sections);
layer out = get_network_output_layer(net);
net->outputs = out.outputs;
net->output = out.output;
//net->input = (float *)calloc(net->inputs*net->batch, sizeof(float));
workspace_size = 0;//donot calloc workspace
//if(workspace_size){
// //printf("%ld\n", workspace_size);
// net->workspace = (float *)calloc(1, workspace_size);
//}
return net;
}
list *read_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
if(file == 0) file_error(filename);
char *line;
int nu = 0;
list *options = make_list();
section *current = 0;
while((line=fgetl(file)) != 0){
++ nu;
strip(line);
switch(line[0]){
case '[':
current = (section *)malloc(sizeof(section));
list_insert(options, current);
current->options = make_list();
current->type = line;
break;
case '\0':
case '#':
case ';':
free(line);
break;
default:
if(!read_option(line, current->options)){
fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
free(line);
}
break;
}
}
fclose(file);
return options;
}
void load_convolutional_weights(layer l, FILE *fp)
{
int num = l.nweights;
fread(l.biases, sizeof(float), l.n, fp);
if (l.batch_normalize){
fread(l.scales, sizeof(float), l.n, fp);
fread(l.rolling_mean, sizeof(float), l.n, fp);
fread(l.rolling_variance, sizeof(float), l.n, fp);
}
fread(l.weights, sizeof(float), num, fp);
}
void load_weights_upto(network *net, char *filename, int start, int cutoff)
{
fprintf(stderr, "Loading weights from %s...", filename);
fflush(stdout);
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
int major;
int minor;
int revision;
fread(&major, sizeof(int), 1, fp);
fread(&minor, sizeof(int), 1, fp);
fread(&revision, sizeof(int), 1, fp);
printf("major=%d;minor=%d;revision=%d\n",major,minor,revision);// 0 2 0
printf("if true ro false:%d\n",(major*10 + minor) >= 2 && major < 1000 && minor < 1000);
if ((major*10 + minor) >= 2 && major < 1000 && minor < 1000){
//fread(net->seen, sizeof(size_t), 1, fp);
fread(net->seen, sizeof(size_t), 1, fp);
fread(net->seen, sizeof(size_t), 1, fp);
}else {
int iseen = 0;
fread(&iseen, sizeof(int), 1, fp);
*net->seen = iseen;
}
//printf("sizeof(size_t)=%u\n",sizeof(size_t));// in my PC is 4
int i;
for(i = start; i < net->n && i < cutoff; ++i){
layer l = net->layers[i];
if(l.type == CONVOLUTIONAL){
load_convolutional_weights(l, fp);
}
}
fprintf(stderr, "Done!\n");
fclose(fp);
}
void load_weights(network *net, char *filename)
{
load_weights_upto(net, filename, 0, net->n);
}
/////////////////praser end
/////////////////network begin
load_args get_base_args(network *net)
{
load_args args = {0};
args.w = net->w;
args.h = net->h;
args.size = net->w;
args.min = net->min_crop;
args.max = net->max_crop;
args.angle = net->angle;
args.aspect = net->aspect;
args.exposure = net->exposure;
args.center = net->center;
args.saturation = net->saturation;
args.hue = net->hue;
return args;
}
network *load_network(char *cfg, char *weights, int clear)
{
network *net = parse_network_cfg(cfg);
//if(weights && weights[0] != 0){
// load_weights(net, weights);
//}
if(clear) (*net->seen) = 0;
return net;
}
char *get_layer_string(LAYER_TYPE a)
{
switch(a){
case CONVOLUTIONAL:
return "convolutional";
case ACTIVE:
return "activation";
case LOCAL:
return "local";
case DECONVOLUTIONAL:
return "deconvolutional";
case CONNECTED:
return "connected";
case RNN:
return "rnn";
case GRU:
return "gru";
case LSTM:
return "lstm";
case CRNN:
return "crnn";
case MAXPOOL:
return "maxpool";
case REORG:
return "reorg";
case AVGPOOL:
return "avgpool";
case SOFTMAX:
return "softmax";
case DETECTION:
return "detection";
case REGION:
return "region";
case YOLO:
return "yolo";
case DROPOUT:
return "dropout";
case CROP:
return "crop";
case COST:
return "cost";
case ROUTE:
return "route";
case SHORTCUT:
return "shortcut";
case NORMALIZATION:
return "normalization";
case BATCHNORM:
return "batchnorm";
default:
break;
}
return "none";
}
network *make_network(int n)
{
network *net = (network *)calloc(1, sizeof(network));
net->n = n;
net->layers = (layer *)calloc(net->n, sizeof(layer));
net->seen = (size_t *)calloc(1, sizeof(size_t));
net->t = (int *)calloc(1, sizeof(int));
net->cost = (float *)calloc(1, sizeof(float));
return net;
}
void forward_network(network *netp)
{
network net = *netp;
int i;
for(i = 0; i < net.n; ++i){
net.index = i;
layer l = net.layers[i];
l.forward(l, net);
net.input = l.output;
// printf("layer [%d]\n",i);
}
}
void set_temp_network(network *net, float t)
{
int i;
for(i = 0; i < net->n; ++i){
net->layers[i].temperature = t;
}
}
void set_batch_network(network *net, int b)
{
net->batch = b;
int i;
for(i = 0; i < net->n; ++i){
net->layers[i].batch = b;
}
}
float *network_predict(network *net, float *input)
{
network orig = *net;
net->input = input;
net->truth = 0;
net->train = 0;
net->delta = 0;
forward_network(net);
float *out = net->output;
*net = orig;
return out;
}
int yolo_num_detections(layer l, float thresh)
{
int i, n;
int count = 0;
for (i = 0; i < l.w*l.h; ++i){
for(n = 0; n < l.n; ++n){
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
if(l.output[obj_index] > thresh){
++count;
}
}
}
return count;
}
int num_detections(network *net, float thresh)
{
int i;
int s = 0;
for(i = 0; i < net->n; ++i){
layer l = net->layers[i];
if(l.type == YOLO){
s += yolo_num_detections(l, thresh);
}
if(l.type == DETECTION || l.type == REGION){
s += l.w*l.h*l.n;
}
}
return s;
}
detection *make_network_boxes(network *net, float thresh, int *num)
{
layer l = net->layers[net->n - 1];
int i;
int nboxes = num_detections(net, thresh);
//printf("num_detections nboxes = %d\n",nboxes);
if(num) *num = nboxes;
detection *dets = (detection *)calloc(nboxes, sizeof(detection));
for(i = 0; i < nboxes; ++i){
dets[i].prob = (float *)calloc(l.classes, sizeof(float));
}
return dets;
}
box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride)
{
box b;
b.x = (i + x[index + 0*stride]) / lw;
b.y = (j + x[index + 1*stride]) / lh;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
return b;
}
void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w=0;
int new_h=0;
if (((float)netw/w) < ((float)neth/h)) {
new_w = netw;
new_h = (h * netw)/w;
} else {
new_h = neth;
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
b.h *= (float)neth/new_h;
if(!relative){
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets)
{
int i,j,n;
float *predictions = l.output;
// if (l.batch == 2) avg_flipped_yolo(l);
int count = 0;
for (i = 0; i < l.w*l.h; ++i){
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float objectness = predictions[obj_index];
if(objectness <= thresh) continue;
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
dets[count].bbox = get_yolo_box(predictions, l.biases, l.mask[n], box_index, col, row, l.w, l.h, netw, neth, l.w*l.h);
dets[count].objectness = objectness;
dets[count].classes = l.classes;
for(j = 0; j < l.classes; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, 4 + 1 + j);
float prob = objectness*predictions[class_index];
dets[count].prob[j] = (prob > thresh) ? prob : 0;
}
++count;
}
}
correct_yolo_boxes(dets, count, w, h, netw, neth, relative);
return count;
}
box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride)
{
box b;
b.x = (i + x[index + 0*stride]) / w;
b.y = (j + x[index + 1*stride]) / h;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
return b;
}
void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w=0;
int new_h=0;
if (((float)netw/w) < ((float)neth/h)) {
new_w = netw;
new_h = (h * netw)/w;
} else {
new_h = neth;
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
b.h *= (float)neth/new_h;
if(!relative){
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets)
{
int i,j,n,z;
float *predictions = l.output;
if (l.batch == 2) {
float *flip = l.output + l.outputs;
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w/2; ++i) {
for (n = 0; n < l.n; ++n) {
for(z = 0; z < l.classes + l.coords + 1; ++z){
int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i;
int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1);
float swap = flip[i1];
flip[i1] = flip[i2];
flip[i2] = swap;
if(z == 0){
flip[i1] = -flip[i1];
flip[i2] = -flip[i2];
}
}
}
}
}
for(i = 0; i < l.outputs; ++i){
l.output[i] = (l.output[i] + flip[i])/2.;
}
}
for (i = 0; i < l.w*l.h; ++i){
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int index = n*l.w*l.h + i;
for(j = 0; j < l.classes; ++j){
dets[index].prob[j] = 0;
}
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float scale = l.background ? 1 : predictions[obj_index];
dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h, l.w*l.h);
dets[index].objectness = scale > thresh ? scale : 0;
if(dets[index].mask){
for(j = 0; j < l.coords - 4; ++j){
dets[index].mask[j] = l.output[mask_index + j*l.w*l.h];
}
}
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background);
if(dets[index].objectness){
for(j = 0; j < l.classes; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
}
}
}
correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative);
}
void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets)
{
int j;
for(j = 0; j < net->n; ++j){
layer l = net->layers[j];
if(l.type == YOLO){
int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets);
dets += count;
}
if(l.type == REGION){
get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets);
dets += l.w*l.h*l.n;
}
}
}
detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num)
{
detection *dets = make_network_boxes(net, thresh, num);
fill_network_boxes(net, w, h, thresh, hier, map, relative, dets);
return dets;
}
void free_detections(detection *dets, int n)
{
int i;
for(i = 0; i < n; ++i){
free(dets[i].prob);
if(dets[i].mask) free(dets[i].mask);
}
free(dets);
}
int network_width(network *net){return net->w;}
int network_height(network *net){return net->h;}
layer get_network_output_layer(network *net)
{
int i;
for(i = net->n - 1; i >= 0; --i){
if(net->layers[i].type != COST) break;
}
return net->layers[i];
}
void free_network(network *net)
{
int i;
for(i = 0; i < net->n; ++i){
free_layer(net->layers[i]);
}
free(net->layers);
if(net->input) free(net->input);
if(net->truth) free(net->truth);
free(net);
}
layer network_output_layer(network *net)
{
int i;
for(i = net->n - 1; i >= 0; --i){
if(net->layers[i].type != COST) break;
}
return net->layers[i];
}
int network_inputs(network *net)
{
return net->layers[0].inputs;
}
int network_outputs(network *net)
{
return network_output_layer(net).outputs;
}
float *network_output(network *net)
{
return network_output_layer(net).output;
}
//////////////////network end
//////////////////////box begin
int nms_comparator(const void *pa, const void *pb)
{
detection a = *(detection *)pa;
detection b = *(detection *)pb;
float diff = 0;
if(b.sort_class >= 0){
diff = a.prob[b.sort_class] - b.prob[b.sort_class];
} else {
diff = a.objectness - b.objectness;
}
if(diff < 0) return 1;
else if(diff > 0) return -1;
return 0;
}
float overlap(float x1, float w1, float x2, float w2)
{
float l1 = x1 - w1/2;
float l2 = x2 - w2/2;
float left = l1 > l2 ? l1 : l2;
float r1 = x1 + w1/2;
float r2 = x2 + w2/2;
float right = r1 < r2 ? r1 : r2;
return right - left;
}
float box_intersection(box a, box b)
{
float w = overlap(a.x, a.w, b.x, b.w);
float h = overlap(a.y, a.h, b.y, b.h);
if(w < 0 || h < 0) return 0;
float area = w*h;
return area;
}
float box_union(box a, box b)
{
float i = box_intersection(a, b);
float u = a.w*a.h + b.w*b.h - i;
return u;
}
float box_iou(box a, box b)
{
return box_intersection(a, b)/box_union(a, b);
}
void do_nms_sort(detection *dets, int total, int classes, float thresh)
{
int i, j, k;
k = total-1;
for(i = 0; i <= k; ++i){
if(dets[i].objectness == 0){
detection swap = dets[i];
dets[i] = dets[k];
dets[k] = swap;
--k;
--i;
}
}
total = k+1;
for(k = 0; k < classes; ++k){
for(i = 0; i < total; ++i){
dets[i].sort_class = k;
}
qsort(dets, total, sizeof(detection), nms_comparator);
for(i = 0; i < total; ++i){
if(dets[i].prob[k] == 0) continue;
box a = dets[i].bbox;
for(j = i+1; j < total; ++j){
box b = dets[j].bbox;
if (box_iou(a, b) > thresh){
dets[j].prob[k] = 0;
}
}
}
}
}
//////////////////////box end
//////////////////////image begin
float colors[6][3] = { {1,0,1}, {0,0,1},{0,1,1},{0,1,0},{1,1,0},{1,0,0} };
float get_color(int c, int x, int max)
{
float ratio = ((float)x/max)*5;
int i = floor(ratio);
int j = ceil(ratio);
ratio -= i;
float r = (1-ratio) * colors[i][c] + ratio*colors[j][c];
//printf("%f\n", r);
return r;
}
static float get_pixel_extend(image m, int x, int y, int c)
{
if(x < 0 || x >= m.w || y < 0 || y >= m.h) return 0;
/*
if(x < 0) x = 0;
if(x >= m.w) x = m.w-1;
if(y < 0) y = 0;
if(y >= m.h) y = m.h-1;
*/
if(c < 0 || c >= m.c) return 0;
return get_pixel(m, x, y, c);
}
void composite_image(image source, image dest, int dx, int dy)
{
int x,y,k;
for(k = 0; k < source.c; ++k){
for(y = 0; y < source.h; ++y){
for(x = 0; x < source.w; ++x){
float val = get_pixel(source, x, y, k);
float val2 = get_pixel_extend(dest, dx+x, dy+y, k);
set_pixel(dest, dx+x, dy+y, k, val * val2);
}
}
}
}
image border_image(image a, int border)
{
image b = make_image(a.w + 2*border, a.h + 2*border, a.c);
int x,y,k;
for(k = 0; k < b.c; ++k){
for(y = 0; y < b.h; ++y){
for(x = 0; x < b.w; ++x){
float val = get_pixel_extend(a, x - border, y - border, k);
if(x - border < 0 || x - border >= a.w || y - border < 0 || y - border >= a.h) val = 1;
set_pixel(b, x, y, k, val);
}
}
}
return b;
}
image copy_image(image p)
{
image copy = p;
copy.data = (float *)calloc(p.h*p.w*p.c, sizeof(float));
memcpy(copy.data, p.data, p.h*p.w*p.c*sizeof(float));
return copy;
}
image tile_images(image a, image b, int dx)
{
if(a.w == 0) return copy_image(b);
image c = make_image(a.w + b.w + dx, (a.h > b.h) ? a.h : b.h, (a.c > b.c) ? a.c : b.c);
fill_cpu(c.w*c.h*c.c, 1, c.data, 1);
embed_image(a, c, 0, 0);
composite_image(b, c, a.w + dx, 0);
return c;
}
image get_label(image **characters, char *string, int size)
{
size = size/10;
if(size > 7) size = 7;
image label = make_empty_image(0,0,0);
while(*string){
image l = characters[size][(int)*string];
image n = tile_images(label, l, -size - 1 + (size+1)/2);
free_image(label);
label = n;
++string;
}
image b = border_image(label, label.h*.25);
free_image(label);
return b;
}
void draw_label(image a, int r, int c, image label, const float *rgb)
{
int w = label.w;
int h = label.h;
if (r - h >= 0) r = r - h;
int i, j, k;
for(j = 0; j < h && j + r < a.h; ++j){
for(i = 0; i < w && i + c < a.w; ++i){
for(k = 0; k < label.c; ++k){
float val = get_pixel(label, i, j, k);
set_pixel(a, i+c, j+r, k, rgb[k] * val);
}
}
}
}
void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b)
{
//normalize_image(a);
int i;
if(x1 < 0) x1 = 0;
if(x1 >= a.w) x1 = a.w-1;
if(x2 < 0) x2 = 0;
if(x2 >= a.w) x2 = a.w-1;
if(y1 < 0) y1 = 0;
if(y1 >= a.h) y1 = a.h-1;
if(y2 < 0) y2 = 0;
if(y2 >= a.h) y2 = a.h-1;
for(i = x1; i <= x2; ++i){
a.data[i + y1*a.w + 0*a.w*a.h] = r;
a.data[i + y2*a.w + 0*a.w*a.h] = r;
a.data[i + y1*a.w + 1*a.w*a.h] = g;
a.data[i + y2*a.w + 1*a.w*a.h] = g;
a.data[i + y1*a.w + 2*a.w*a.h] = b;
a.data[i + y2*a.w + 2*a.w*a.h] = b;
}
for(i = y1; i <= y2; ++i){
a.data[x1 + i*a.w + 0*a.w*a.h] = r;
a.data[x2 + i*a.w + 0*a.w*a.h] = r;
a.data[x1 + i*a.w + 1*a.w*a.h] = g;
a.data[x2 + i*a.w + 1*a.w*a.h] = g;
a.data[x1 + i*a.w + 2*a.w*a.h] = b;
a.data[x2 + i*a.w + 2*a.w*a.h] = b;
}
}
void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b)
{
int i;
for(i = 0; i < w; ++i){
draw_box(a, x1+i, y1+i, x2-i, y2-i, r, g, b);
}
}
image float_to_image(int w, int h, int c, float *data)
{
image out = make_empty_image(w,h,c);
out.data = data;
return out;
}
image threshold_image(image im, float thresh)
{
int i;
image t = make_image(im.w, im.h, im.c);
for(i = 0; i < im.w*im.h*im.c; ++i){
t.data[i] = im.data[i]>thresh ? 1 : 0;
}
return t;
}
void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes)
{
int i,j;
for(i = 0; i < num; ++i){
char labelstr[4096] = {0};
int class_t = -1;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j] > thresh){
if (class_t < 0) {
strcat(labelstr, names[j]);
class_t = j;
} else {
strcat(labelstr, ", ");
strcat(labelstr, names[j]);
}
printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100);
}
}
if(class_t >= 0){
int width = im.h * .006;
//printf("%d %s: %.0f%%\n", i, names[class], prob*100);
int offset = class_t*123457 % classes;
float red = get_color(2,offset,classes);
float green = get_color(1,offset,classes);
float blue = get_color(0,offset,classes);
float rgb[3];
//width = prob*20+2;
rgb[0] = red;
rgb[1] = green;
rgb[2] = blue;
box b = dets[i].bbox;
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
int left = (b.x-b.w/2.)*im.w;
int right = (b.x+b.w/2.)*im.w;
int top = (b.y-b.h/2.)*im.h;
int bot = (b.y+b.h/2.)*im.h;
if(left < 0) left = 0;
if(right > im.w-1) right = im.w-1;
if(top < 0) top = 0;
if(bot > im.h-1) bot = im.h-1;
draw_box_width(im, left, top, right, bot, width, red, green, blue);
if (alphabet) {
image label = get_label(alphabet, labelstr, (im.h*.03));
draw_label(im, top + width, left, label, rgb);
free_image(label);
}
if (dets[i].mask){
image mask = float_to_image(14, 14, 1, dets[i].mask);
image resized_mask = resize_image(mask, b.w*im.w, b.h*im.h);
image tmask = threshold_image(resized_mask, .5);
embed_image(tmask, im, left, top);
free_image(mask);
free_image(resized_mask);
free_image(tmask);
}
}
}
}
//////////////////////image end
//////////////////////////HLS begin
void yolov2_hls_ps(network *net, float *input)
{
#define MAX(x,y) ((x)>(y)?(x):(y))
#define MIN(x,y) ((x)<(y)?(x):(y))
#define S 2
#define K 3
#define Tn 2
#define Tm 32
#define Tr 26
#define Tc 26
#define OnChipIB_Width ((Tc-1)*S+K)
#define OnChipIB_Height ((Tr-1)*S+K)
#define MAX_BETA_LENGTH (1024)
#define INTER_WIDTH (19)
int x;
network orig = *net;
net->input = input;
int weight_offset[32] = {864, 18432, 73728, 8192, 73728,
294912, 32768, 294912, 1179648, 131072, 1179648, 131072,
1179648, 4718592, 524288, 4718592, 524288, 4718592, 9437184,
9437184, 32768, 11796480, 435200, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int beta_offset[32] = {32, 64, 128, 64, 128, 256, 128, 256, 512, 256, 512, 256, 512, 1024,
512, 1024, 512, 1024, 1024, 1024, 64, 1024, 425, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int offset_index = 0;
int *Weight_buf = (int *)calloc(203767168/4/2,sizeof(int));
FILE *fp_w = fopen("yolov2_w_reorg_bn_ap16_short16.bin", "rb");
if(!fp_w) file_error("yolov2_w_reorg_bn_ap16_short16.bin");
fread(Weight_buf, sizeof(int), 203767168/4/2, fp_w);
fclose(fp_w);
int *Beta_buf = (int *)calloc((43044/4 + 1)/2,sizeof(int));
FILE *fp_b = fopen("yolov2_b_ap16_short16.bin", "rb");
if(!fp_b) file_error("yolov2_b_ap16_short16.bin");
fread(Beta_buf, sizeof(int), (43044+4)/4/2, fp_b);
fclose(fp_b);
#define MEM_LEN (416*416*32/2+208*208*32/2)
int *Memory_buf = (int*)calloc(MEM_LEN+1024+1024,sizeof(int));
int *Memory_top = Memory_buf+1024;
int *Memory_bottom = Memory_top + MEM_LEN;
float *region_buf = (float *)calloc(13*13*432,sizeof(float));
if(!region_buf) printf("region_buf calloc fail\n");
int tmp_in;
short current_in,next_in;
bool NextPixelInFlag = true;
int InputPixelOffset = 0;
int *Input_ptr = (int *)Memory_top;
for(x=0;x<416*416*3;x++)//1st Layer input Q14
{
if(NextPixelInFlag)
{
current_in = (short)(input[x]*pow(2.0,14));
NextPixelInFlag = false;
}
else
{
next_in = (short)(input[x]*pow(2.0,14));
tmp_in = (next_in<<16) + (current_in);
Input_ptr[InputPixelOffset] = tmp_in;
InputPixelOffset++;
NextPixelInFlag = true;
}
}
int inputQ[24];
int weightQ[23];
int betaQ[23];
FILE *Qin;
Qin = fopen("yolov2_bn_input_maxQ_24.bin","rb");
if(!Qin) file_error("Qin error 1\n");
fread(inputQ,sizeof(int),24,Qin);
fclose(Qin);
inputQ[20] = 10;//route 16 && route 27 24
for(x=0;x<24;x++)
printf("[%2d inputQ]=%2d\n",x,inputQ[x]);
Qin = fopen("yolov2_w_reorg_bn_ap16_maxQ_23.bin","rb");
if(!Qin) file_error("Qin error 2\n");
fread(weightQ,sizeof(int),23,Qin);
fclose(Qin);
for(x=0;x<23;x++)
printf("[%2d weightQ]=%2d\n",x,weightQ[x]);
Qin = fopen("yolov2_b_ap16_maxQ_23.bin","rb");
if(!Qin) file_error("Qin error 4\n");
fread(betaQ,sizeof(int),23,Qin);
fclose(Qin);
for(x=0;x<23;x++)
printf("[%2d betaQ]=%2d\n",x,betaQ[x]);
int* in_ptr[32];
int* out_ptr[32];
#define ROUTE16_LEN (26*26*512/2)
#define CONV27_LEN (13*13*256/2)
#define CONV24_LEN (13*13*1024/2)
for(x=0;x<18;x++)
{
if(x%2==0)
{
in_ptr[x] = Memory_top;
out_ptr[x] = Memory_bottom - net->layers[x].outputs/2 ;
}
else
{
in_ptr[x] = out_ptr[x-1];
out_ptr[x] = Memory_top;
}
}
for(x=18;x<25;x++)
{
if(x%2==0)
{
in_ptr[x] = Memory_top;
out_ptr[x] = Memory_bottom - ROUTE16_LEN - net->layers[x].outputs/2;
}else
{
in_ptr[x] = out_ptr[x-1];
out_ptr[x] = Memory_top;
}
}
in_ptr[26] = Memory_bottom - ROUTE16_LEN;
out_ptr[26] = Memory_top;
in_ptr[27] = Memory_top;
out_ptr[27] = Memory_bottom - ROUTE16_LEN - CONV24_LEN - CONV27_LEN;
in_ptr[29] = out_ptr[27];
out_ptr[29] = Memory_top;
in_ptr[30] = Memory_top;
out_ptr[30] = Memory_bottom - (net->layers[30].outputs + 13*13*7)/2;
in_ptr[31] = out_ptr[30];
network netp = *net;
int i;
int woffset = 0;
int boffset = 0;
int TR,TC,TM,TN;
int output_w,output_h;
int rLoops,cLoops,mLoops,nLoops;
double sum_gop = 0.0;
float offchip_factor = 1.5;
for(i = 0; i < netp.n; ++i)
{
netp.index = i;
layer l = netp.layers[i];
printf("Layer[%2d]: ",i);
switch(l.type)
{
case CONVOLUTIONAL:
printf("outputMemory:%8d;BN=%d;Activation=%d;conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n",l.outputs,l.batch_normalize,l.activation, l.n, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
sum_gop += (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.;
output_w = (l.w - l.size + 2*l.pad)/l.stride + 1 ;
output_h = (l.h - l.size + 2*l.pad)/l.stride + 1 ;
TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1
TR = MIN(output_h,TR);
TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc);
TC = MIN(output_w,TC);
TM = MIN(l.n,Tm);
TN = MIN(l.c,Tn);
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = (int)ceil(((float)l.n)/TM);
nLoops = (int)ceil(((float)l.c)/TN);
//printf("OW=%d,OH=%d,rLoops=%d,cLoops=%d,mLoops=%d,nLoops=%d\n",output_w,output_h,rLoops,cLoops,mLoops,nLoops);
YOLO2_FPGA(in_ptr[i],out_ptr[i],Weight_buf+woffset/2,Beta_buf+boffset/2,
l.c,l.n,l.size,
l.stride,l.w,l.h,l.pad,l.activation==LEAKY?1:0,l.batch_normalize?1:0,
TM,TN,TR,TC,
mLoops,nLoops,rLoops,cLoops,0,
inputQ[offset_index],inputQ[offset_index+1],weightQ[offset_index],betaQ[offset_index]);
printf("TR=%d,TC=%d,TM=%d,TN=%d,rLoops=%d,cLoops=%d,mLoops=%d,nLoops=%d\n",TR,TC,TM,TN,rLoops,cLoops,mLoops,nLoops);
woffset += weight_offset[offset_index];
boffset += beta_offset[offset_index];
offset_index++;
break;
case MAXPOOL:
printf("outputMemory:%8d;max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c);
output_w = (l.w - l.size)/l.stride + 1 ;
output_h = (l.h - l.size)/l.stride + 1 ;
TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1
TR = MIN(output_h,TR);
TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc);
TC = MIN(output_w,TC);
TM = MIN(Tm,Tn);
TM = MIN(l.c,TM);
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = (int)ceil(((float)l.c)/TM);
YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,l.c,l.c,
l.size,l.stride,l.w,l.h,0,0,0,TM,0,TR,TC,mLoops,0,rLoops,cLoops,1,
inputQ[offset_index],inputQ[offset_index],0,0);
break;
case REORG:
printf("outputMemory:%8d;reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c);
output_w = 26;
output_h = 32*13;
TR = MIN(((OnChipIB_Height-l.stride)/l.stride+1),Tr);//keep Kernel_stride>=1
TR = MIN(output_h,TR);
TC = MIN(((OnChipIB_Width-l.stride)/l.stride+1),Tc);
TC = MIN(output_w,TC);
TM = 4;
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = 1;
YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,1,4,
l.stride,l.stride,52,32*26,0,0,0,TM,0,TR,TC,mLoops,0,rLoops,cLoops,2,
inputQ[offset_index],inputQ[offset_index],0,0);
break;
case ROUTE:
printf("outputMemory:%8d;route ",l.outputs);
int j;
for(j = 0; j < l.n; ++j){
printf(" %d", l.input_layers[j]);
}
printf("\n");
break;
case REGION:
printf("outputMemory:%8d;Detection\n",l.outputs);
//netp.input = in_ptr[i];
double LastLayerOutputPara = pow(2.0,-inputQ[23]);
bool NextPixelFlag = true;
int OutputPixelOffset = 0;
short current_p,next_p,output_p;
int *Output_ptr = (int *)(in_ptr[i]);
for(j=0;j<l.outputs;j++)
{
if(NextPixelFlag)
{
int tmp_p = Output_ptr[OutputPixelOffset];
OutputPixelOffset++;
current_p = tmp_p;
next_p = tmp_p >> 16;
output_p = current_p;
NextPixelFlag = false;
}else
{
output_p = next_p;
NextPixelFlag = true;
}
region_buf[j] = output_p*LastLayerOutputPara;
}
netp.input = region_buf;
forward_region_layer(l,netp);
break;
}
netp.input = l.output;
}
printf("SUM_GOP=%g\n",sum_gop);
*net = orig;
free(region_buf);
free(Memory_buf);
free(Weight_buf);
free(Beta_buf);
}
//////////////////////////HLS end
#endif
|
weighted_sptree_inl.h | /*
*
* Copyright (c) 2014, Laurens van der Maaten (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY LAURENS VAN DER MAATEN ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL LAURENS VAN DER MAATEN BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
/*
*
* Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef WEIGHTED_SPTREE_INL
#define WEIGHTED_SPTREE_INL
#include <math.h>
#include <float.h>
#include <stdlib.h>
#include <stdio.h>
#include <cmath>
#include "weighted_sptree.h"
#include <math.h>
#include <algorithm>
namespace hdi{
namespace dr{
//! Constructs cell
template <typename scalar_type>
WeightedSPTree<scalar_type>::Cell::Cell(unsigned int emb_dimension) {
_emb_dimension = emb_dimension;
corner = (hp_scalar_type*) malloc(_emb_dimension * sizeof(hp_scalar_type));
width = (hp_scalar_type*) malloc(_emb_dimension * sizeof(hp_scalar_type));
}
template <typename scalar_type>
WeightedSPTree<scalar_type>::Cell::Cell(unsigned int emb_dimension, hp_scalar_type* inp_corner, hp_scalar_type* inp_width) {
_emb_dimension = emb_dimension;
corner = (hp_scalar_type*) malloc(_emb_dimension * sizeof(hp_scalar_type));
width = (hp_scalar_type*) malloc(_emb_dimension * sizeof(hp_scalar_type));
for(int d = 0; d < _emb_dimension; d++) setCorner(d, inp_corner[d]);
for(int d = 0; d < _emb_dimension; d++) setWidth( d, inp_width[d]);
}
//! Destructs cell
template <typename scalar_type>
WeightedSPTree<scalar_type>::Cell::~Cell() {
free(corner);
free(width);
}
template <typename scalar_type>
typename WeightedSPTree<scalar_type>::hp_scalar_type WeightedSPTree<scalar_type>::Cell::getCorner(unsigned int d) {
return corner[d];
}
template <typename scalar_type>
typename WeightedSPTree<scalar_type>::hp_scalar_type WeightedSPTree<scalar_type>::Cell::getWidth(unsigned int d) {
return width[d];
}
template <typename scalar_type>
void WeightedSPTree<scalar_type>::Cell::setCorner(unsigned int d, hp_scalar_type val) {
corner[d] = val;
}
template <typename scalar_type>
void WeightedSPTree<scalar_type>::Cell::setWidth(unsigned int d, hp_scalar_type val) {
width[d] = val;
}
// Checks whether a point lies in a cell
template <typename scalar_type>
bool WeightedSPTree<scalar_type>::Cell::containsPoint(scalar_type point[])
{
for(int d = 0; d < _emb_dimension; d++) {
if(corner[d] - width[d] > point[d]) return false;
if(corner[d] + width[d] < point[d]) return false;
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////
//! Default constructor for WeightedSPTree -- build tree, too!
template <typename scalar_type>
WeightedSPTree<scalar_type>::WeightedSPTree(unsigned int D, scalar_type* inp_data, const scalar_type* weights, unsigned int N){
// Compute mean, width, and height of current map (boundaries of WeightedSPTree)
hp_scalar_type* mean_Y = (hp_scalar_type*) malloc(D * sizeof(hp_scalar_type)); for(unsigned int d = 0; d < D; d++) mean_Y[d] = .0;
hp_scalar_type* min_Y = (hp_scalar_type*) malloc(D * sizeof(hp_scalar_type)); for(unsigned int d = 0; d < D; d++) min_Y[d] = DBL_MAX;
hp_scalar_type* max_Y = (hp_scalar_type*) malloc(D * sizeof(hp_scalar_type)); for(unsigned int d = 0; d < D; d++) max_Y[d] = -DBL_MAX;
for(unsigned int n = 0; n < N; n++) {
for(unsigned int d = 0; d < D; d++) {
mean_Y[d] += inp_data[n * D + d];
if(inp_data[n * D + d] < min_Y[d]) min_Y[d] = inp_data[n * D + d];
if(inp_data[n * D + d] > max_Y[d]) max_Y[d] = inp_data[n * D + d];
}
}
for(int d = 0; d < D; d++) mean_Y[d] /= (hp_scalar_type) N;
// Construct WeightedSPTree
hp_scalar_type* width = (hp_scalar_type*) malloc(D * sizeof(hp_scalar_type));
for(int d = 0; d < D; d++)
//width[d] = fmax(max_Y[d] - mean_Y[d], mean_Y[d] - min_Y[d]) + 1e-5; //C++11
width[d] = std::max(max_Y[d] - mean_Y[d], mean_Y[d] - min_Y[d]) + 1e-5;
init(NULL, D, inp_data, weights, mean_Y, width);
fill(N);
// Clean up memory
free(mean_Y);
free(max_Y);
free(min_Y);
free(width);
}
//! Constructor for WeightedSPTree with particular size and parent -- build the tree, too!
template <typename scalar_type>
WeightedSPTree<scalar_type>::WeightedSPTree(unsigned int D, scalar_type* inp_data, const scalar_type* weights, unsigned int N, hp_scalar_type* inp_corner, hp_scalar_type* inp_width){
init(NULL, D, inp_data, weights, inp_corner, inp_width);
fill(N);
}
//! Constructor for WeightedSPTree with particular size (do not fill the tree)
template <typename scalar_type>
WeightedSPTree<scalar_type>::WeightedSPTree(unsigned int D, scalar_type* inp_data, const scalar_type* weights, hp_scalar_type* inp_corner, hp_scalar_type* inp_width){
init(NULL, D, inp_data, weights, inp_corner, inp_width);
}
//! Constructor for WeightedSPTree with particular size and parent (do not fill tree)
template <typename scalar_type>
WeightedSPTree<scalar_type>::WeightedSPTree(WeightedSPTree* inp_parent, unsigned int D, scalar_type* inp_data, const scalar_type* weights, hp_scalar_type* inp_corner, hp_scalar_type* inp_width){
init(inp_parent, D, inp_data, weights, inp_corner, inp_width);
}
//! Constructor for WeightedSPTree with particular size and parent -- build the tree, too!
template <typename scalar_type>
WeightedSPTree<scalar_type>::WeightedSPTree(WeightedSPTree* inp_parent, unsigned int D, scalar_type* inp_data, const scalar_type* weights, unsigned int N, hp_scalar_type* inp_corner, hp_scalar_type* inp_width){
init(inp_parent, D, inp_data, weights, inp_corner, inp_width);
fill(N);
}
//! Main initialization function
template <typename scalar_type>
void WeightedSPTree<scalar_type>::init(WeightedSPTree* inp_parent, unsigned int D, scalar_type* inp_data, const scalar_type* weights, hp_scalar_type* inp_corner, hp_scalar_type* inp_width){
parent = inp_parent;
_emb_dimension = D;
no_children = 2;
for(unsigned int d = 1; d < D; d++){
no_children *= 2;
}
_emb_positions = inp_data;
_weights = weights;
is_leaf = true;
size = 0;
cum_size = 0;
boundary = new Cell(_emb_dimension);
for(unsigned int d = 0; d < D; d++){
boundary->setCorner(d, inp_corner[d]);
}
for(unsigned int d = 0; d < D; d++){
boundary->setWidth( d, inp_width[d]);
}
children = (WeightedSPTree**) malloc(no_children * sizeof(WeightedSPTree*));
for(unsigned int i = 0; i < no_children; i++){
children[i] = NULL;
}
_center_of_mass = (hp_scalar_type*) malloc(D * sizeof(hp_scalar_type));
for(unsigned int d = 0; d < D; d++){
_center_of_mass[d] = .0;
}
}
// Destructor for WeightedSPTree
template <typename scalar_type>
WeightedSPTree<scalar_type>::~WeightedSPTree()
{
for(unsigned int i = 0; i < no_children; i++) {
if(children[i] != NULL) delete children[i];
}
free(children);
free(_center_of_mass);
//free(buff);
delete boundary;
}
// Update the _emb_positions underlying this tree
template <typename scalar_type>
void WeightedSPTree<scalar_type>::setData(scalar_type* inp_data, const scalar_type* weights)
{
_emb_positions = inp_data;
}
// Get the parent of the current tree
template <typename scalar_type>
WeightedSPTree<scalar_type>* WeightedSPTree<scalar_type>::getParent()
{
return parent;
}
// Insert a point into the WeightedSPTree
template <typename scalar_type>
bool WeightedSPTree<scalar_type>::insert(unsigned int new_index)
{
//#pragma critical
{
// Ignore objects which do not belong in this quad tree
scalar_type* point = _emb_positions + new_index * _emb_dimension;
if(!boundary->containsPoint(point))
return false;
// Online update of cumulative size and center-of-mass
//cum_size++;
//hp_scalar_type mult1 = (hp_scalar_type) (cum_size - 1) / (hp_scalar_type) cum_size;
//hp_scalar_type mult2 = 1.0 / (hp_scalar_type) cum_size;
cum_size += _weights[new_index];
hp_scalar_type mult1 = (hp_scalar_type) (cum_size - _weights[new_index]) / (hp_scalar_type) cum_size;
hp_scalar_type mult2 = _weights[new_index] / (hp_scalar_type) cum_size;
for(unsigned int d = 0; d < _emb_dimension; d++){
_center_of_mass[d] *= mult1;
}
for(unsigned int d = 0; d < _emb_dimension; d++){
_center_of_mass[d] += mult2 * point[d];
}
// If there is space in this quad tree and it is a leaf, add the object here
if(is_leaf && size < QT_NODE_CAPACITY) {
index[size] = new_index;
size++;
return true;
}
// Don't add duplicates for now (this is not very nice)
bool any_duplicate = false;
for(unsigned int n = 0; n < size; n++) {
bool duplicate = true;
for(unsigned int d = 0; d < _emb_dimension; d++) {
if(point[d] != _emb_positions[index[n] * _emb_dimension + d]) { duplicate = false; break; }
}
any_duplicate = any_duplicate | duplicate;
}
if(any_duplicate) return true;
// Otherwise, we need to subdivide the current cell
if(is_leaf){
subdivide();
}
// Find out where the point can be inserted
for(unsigned int i = 0; i < no_children; i++) {
if(children[i]->insert(new_index)) return true;
}
// Otherwise, the point cannot be inserted (this should never happen)
return false;
}
}
// Create four children which fully divide this cell into four quads of equal area
template <typename scalar_type>
void WeightedSPTree<scalar_type>::subdivide() {
// Create new children
hp_scalar_type* new_corner = (hp_scalar_type*) malloc(_emb_dimension * sizeof(hp_scalar_type));
hp_scalar_type* new_width = (hp_scalar_type*) malloc(_emb_dimension * sizeof(hp_scalar_type));
for(unsigned int i = 0; i < no_children; i++) {
unsigned int div = 1;
for(unsigned int d = 0; d < _emb_dimension; d++) {
new_width[d] = .5 * boundary->getWidth(d);
if((i / div) % 2 == 1) new_corner[d] = boundary->getCorner(d) - .5 * boundary->getWidth(d);
else new_corner[d] = boundary->getCorner(d) + .5 * boundary->getWidth(d);
div *= 2;
}
children[i] = new WeightedSPTree(this, _emb_dimension, _emb_positions, _weights, new_corner, new_width);
}
free(new_corner);
free(new_width);
// Move existing points to correct children
for(unsigned int i = 0; i < size; i++) {
bool success = false;
for(unsigned int j = 0; j < no_children; j++) {
if(!success) success = children[j]->insert(index[i]);
}
index[i] = -1;
}
// Empty parent node
size = 0;
is_leaf = false;
}
// Build WeightedSPTree on dataset
template <typename scalar_type>
void WeightedSPTree<scalar_type>::fill(unsigned int N)
{
int i = 0;
//#pragma omp parallel for
for(i = 0; i < N; i++)
insert(i);
}
// Checks whether the specified tree is correct
template <typename scalar_type>
bool WeightedSPTree<scalar_type>::isCorrect()
{
for(unsigned int n = 0; n < size; n++) {
scalar_type* point = _emb_positions + index[n] * _emb_dimension;
if(!boundary->containsPoint(point)) return false;
}
if(!is_leaf) {
bool correct = true;
for(int i = 0; i < no_children; i++) correct = correct && children[i]->isCorrect();
return correct;
}
else return true;
}
// Build a list of all indices in WeightedSPTree
template <typename scalar_type>
void WeightedSPTree<scalar_type>::getAllIndices(unsigned int* indices)
{
getAllIndices(indices, 0);
}
// Build a list of all indices in WeightedSPTree
template <typename scalar_type>
unsigned int WeightedSPTree<scalar_type>::getAllIndices(unsigned int* indices, unsigned int loc)
{
// Gather indices in current quadrant
for(unsigned int i = 0; i < size; i++) indices[loc + i] = index[i];
loc += size;
// Gather indices in children
if(!is_leaf) {
for(int i = 0; i < no_children; i++) loc = children[i]->getAllIndices(indices, loc);
}
return loc;
}
template <typename scalar_type>
unsigned int WeightedSPTree<scalar_type>::getDepth() {
if(is_leaf) return 1;
unsigned int depth = 0;
for(unsigned int i = 0; i < no_children; i++)
//depth = fmax(depth, children[i]->getDepth()); //C++11
depth = std::max(depth, children[i]->getDepth());
return 1 + depth;
}
// Compute non-edge forces using Barnes-Hut algorithm
template <typename scalar_type>
void WeightedSPTree<scalar_type>::computeNonEdgeForces(unsigned int point_index, hp_scalar_type theta, hp_scalar_type neg_f[], hp_scalar_type& sum_Q)const
{
std::vector<hp_scalar_type> distance(_emb_dimension,0);
// Make sure that we spend no time on empty nodes or self-interactions
if(cum_size == 0 || (is_leaf && size == 1 && index[0] == point_index)){
return;
}
// Compute distance between point and center-of-mass
hp_scalar_type distance_squared = .0;
unsigned int ind = point_index * _emb_dimension;
for(unsigned int d = 0; d < _emb_dimension; d++){
distance[d] = _emb_positions[ind + d] - _center_of_mass[d];
}
for(unsigned int d = 0; d < _emb_dimension; d++){
distance_squared += distance[d] * distance[d];
}
// Check whether we can use this node as a "summary"
hp_scalar_type max_width = 0.0;
hp_scalar_type cur_width;
for(unsigned int d = 0; d < _emb_dimension; d++) {
cur_width = boundary->getWidth(d);
max_width = (max_width > cur_width) ? max_width : cur_width;
}
if(is_leaf || (max_width / sqrt(distance_squared) < theta)) {
// Compute and add t-SNE force between point and current node
hp_scalar_type t_student = 1.0 / (1.0 + distance_squared);
sum_Q += _weights[point_index] * cum_size * t_student;
hp_scalar_type q_it_squared = t_student * t_student;
for(unsigned int d = 0; d < _emb_dimension; d++){
neg_f[d] += _weights[point_index] * cum_size * q_it_squared * distance[d];
}
}else{
// Recursively apply Barnes-Hut to children
for(unsigned int i = 0; i < no_children; i++){
children[i]->computeNonEdgeForces(point_index, theta, neg_f, sum_Q);
}
}
}
//! Print out tree
template <typename scalar_type>
void WeightedSPTree<scalar_type>::print()
{
if(cum_size == 0) {
printf("Empty node\n");
return;
}
if(is_leaf) {
printf("Leaf node; _emb_positions = [");
for(int i = 0; i < size; i++) {
scalar_type* point = _emb_positions + index[i] * _emb_dimension;
for(int d = 0; d < _emb_dimension; d++) printf("%f, ", point[d]);
printf(" (index = %d)", index[i]);
if(i < size - 1) printf("\n");
else printf("]\n");
}
}
else {
printf("Intersection node with center-of-mass = [");
for(int d = 0; d < _emb_dimension; d++) printf("%f, ", _center_of_mass[d]);
printf("]; children are:\n");
for(int i = 0; i < no_children; i++) children[i]->print();
}
}
}
}
#endif
|
GB_unop__identity_int64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_fc64)
// op(A') function: GB (_unop_tran__identity_int64_fc64)
// C type: int64_t
// A type: GxB_FC64_t
// cast: int64_t cij = GB_cast_to_int64_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = GB_cast_to_int64_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = GB_cast_to_int64_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_fc64)
(
int64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
int64_t z = GB_cast_to_int64_t (creal (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
int64_t z = GB_cast_to_int64_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
composite.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resample.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImage() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImage method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const Image *source_image,const CompositeOperator compose,
% const MagickBooleanType clip_to_self,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the canvas image, modified by he composition
%
% o source_image: the source image.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o clip_to_self: set to MagickTrue to limit composition to area composed.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
Composition based on the SVG specification:
A Composition is defined by...
Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc)
Y = 1 for source preserved
Z = 1 for canvas preserved
Conversion to transparency (then optimized)
Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
Where...
Sca = Sc*Sa normalized Source color divided by Source alpha
Dca = Dc*Da normalized Dest color divided by Dest alpha
Dc' = Dca'/Da' the desired color value for this channel.
Da' in in the follow formula as 'gamma' The resulting alpla value.
Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in
the following optimizations...
gamma = Sa+Da-Sa*Da;
gamma = 1 - QuantumScale*alpha * QuantumScale*beta;
opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma
The above SVG definitions also define that Mathematical Composition
methods should use a 'Over' blending mode for Alpha Channel.
It however was not applied for composition modes of 'Plus', 'Minus',
the modulus versions of 'Add' and 'Subtract'.
Mathematical operator changes to be applied from IM v6.7...
1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
'ModulusAdd' and 'ModulusSubtract' for clarity.
2) All mathematical compositions work as per the SVG specification
with regard to blending. This now includes 'ModulusAdd' and
'ModulusSubtract'.
3) When the special channel flag 'sync' (syncronize channel updates)
is turned off (enabled by default) then mathematical compositions are
only performed on the channels specified, and are applied
independantally of each other. In other words the mathematics is
performed as 'pure' mathematical operations, rather than as image
operations.
*/
static void HCLComposite(const MagickRealType hue,const MagickRealType chroma,
const MagickRealType luma,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
b,
c,
g,
h,
m,
r,
x;
/*
Convert HCL to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
h=6.0*hue;
c=chroma;
x=c*(1.0-fabs(fmod(h,2.0)-1.0));
r=0.0;
g=0.0;
b=0.0;
if ((0.0 <= h) && (h < 1.0))
{
r=c;
g=x;
}
else
if ((1.0 <= h) && (h < 2.0))
{
r=x;
g=c;
}
else
if ((2.0 <= h) && (h < 3.0))
{
g=c;
b=x;
}
else
if ((3.0 <= h) && (h < 4.0))
{
g=x;
b=c;
}
else
if ((4.0 <= h) && (h < 5.0))
{
r=x;
b=c;
}
else
if ((5.0 <= h) && (h < 6.0))
{
r=c;
b=x;
}
m=luma-(0.298839*r+0.586811*g+0.114350*b);
*red=QuantumRange*(r+m);
*green=QuantumRange*(g+m);
*blue=QuantumRange*(b+m);
}
static void CompositeHCL(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma,
MagickRealType *luma)
{
MagickRealType
b,
c,
g,
h,
max,
r;
/*
Convert RGB to HCL colorspace.
*/
assert(hue != (MagickRealType *) NULL);
assert(chroma != (MagickRealType *) NULL);
assert(luma != (MagickRealType *) NULL);
r=red;
g=green;
b=blue;
max=MagickMax(r,MagickMax(g,b));
c=max-(MagickRealType) MagickMin(r,MagickMin(g,b));
h=0.0;
if (c == 0)
h=0.0;
else
if (red == max)
h=fmod((g-b)/c+6.0,6.0);
else
if (green == max)
h=((b-r)/c)+2.0;
else
if (blue == max)
h=((r-g)/c)+4.0;
*hue=(h/6.0);
*chroma=QuantumScale*c;
*luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b);
}
static MagickBooleanType CompositeOverImage(Image *image,
const Image *source_image,const MagickBooleanType clip_to_self,
const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*image_view,
*source_view;
const char
*value;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Composite image.
*/
status=MagickTrue;
progress=0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
PixelInfo
canvas_pixel,
source_pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*(ssize_t) GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
Sa,
Sc,
Sca;
ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
alpha=Sa+Da-Sa*Da;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((source_traits == UndefinedPixelTrait) &&
(channel != AlphaPixelChannel))
continue;
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
pixel=QuantumRange*alpha;
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Sc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
gamma=PerceptibleReciprocal(alpha);
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static Image *SeamlessAddImage(const Image *image,const Image *source_image,
const double sign,ExceptionInfo *exception)
{
CacheView
*source_view,
*image_view,
*add_view;
Image
*add_image;
ssize_t
y;
/*
Added or subtract source from image.
*/
add_image=CloneImage(image,0,0,MagickTrue,exception);
if (add_image == (Image *) NULL)
return(add_image);
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
add_view=AcquireVirtualCacheView(add_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
Quantum
*magick_restrict r;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewVirtualPixels(source_view,0,y,image->columns,1,exception);
r=GetCacheViewAuthenticPixels(add_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_image_traits = GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_image_traits == UndefinedPixelTrait) ||
((source_image_traits & UpdatePixelTrait) == 0))
continue;
r[i]=ClampToQuantum(p[i]+sign*GetPixelChannel(source_image,channel,q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(source_image);
r+=GetPixelChannels(add_image);
}
if (x < (ssize_t) image->columns)
break;
if (SyncCacheViewAuthenticPixels(add_view,exception) == MagickFalse)
break;
}
add_view=DestroyCacheView(add_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (y < (ssize_t) image->rows)
add_image=DestroyImage(add_image);
return(add_image);
}
static Image *SeamlessMeanImage(Image *image,const Image *source_image,
ExceptionInfo *exception)
{
CacheView
*image_view,
*source_view;
Image
*mean_image;
Quantum
mean[MaxPixelChannels];
ssize_t
j,
y;
/*
Compute the mean of the image.
*/
(void) memset(mean,0,MaxPixelChannels*sizeof(*mean));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
mean[i]+=QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
if (x < (ssize_t) image->columns)
break;
}
if (y < (ssize_t) image->rows)
return((Image *) NULL);
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
mean[j]=ClampToQuantum((double) QuantumRange*mean[j]/image->columns/
image->rows);
/*
Replace any masked pixels with the mean pixel.
*/
image_view=DestroyCacheView(image_view);
mean_image=CloneImage(image,0,0,MagickTrue,exception);
if (mean_image == (Image *) NULL)
return(mean_image);
image_view=AcquireAuthenticCacheView(mean_image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
for (y=0; y < (ssize_t) mean_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(source_view,0,y,mean_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,mean_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) mean_image->columns; x++)
{
double
alpha = (double) GetPixelAlpha(source_image,p);
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(mean_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(mean_image,i);
PixelTrait traits = GetPixelChannelTraits(mean_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (fabs(alpha) >= MagickEpsilon)
q[i]=ClampToQuantum(mean[i]);
if (fabs(alpha-QuantumRange) < MagickEpsilon)
q[i]=(Quantum) 0;
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(mean_image);
}
if (x < (ssize_t) mean_image->columns)
break;
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (y < (ssize_t) image->rows)
mean_image=DestroyImage(mean_image);
return(mean_image);
}
static MagickBooleanType SeamlessRMSEResidual(const Image *image,
const Image *source_image,double *residual,ExceptionInfo *exception)
{
CacheView
*image_view,
*source_view;
double
area;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,source_image->rows);
columns=MagickMax(image->columns,source_image->columns);
area=0.0;
*residual=0.0;
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_residual;
const Quantum
*magick_restrict p,
*magick_restrict q;
size_t
local_area = 0;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(source_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
channel_residual=0.0;
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(source_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(source_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(source_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits = GetPixelChannelTraits(source_image,channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait) ||
((source_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(source_image,channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(source_image,
channel,q));
channel_residual+=distance*distance;
}
local_area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(source_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SeamlessRMSEResidual)
#endif
{
area+=local_area;
*residual+=channel_residual;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
*residual=sqrt(*residual*area/(double) GetImageChannels(image));
return(status);
}
static MagickBooleanType SeamlessThresholdAlphaChannel(Image *image,
const Image *source_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*source_view;
ssize_t
y;
/*
Threshold the alpha channel.
*/
if (SetImageAlpha(image,OpaqueAlpha,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(source_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
double
alpha = (double) GetPixelAlpha(source_image,p);
ssize_t
i = GetPixelChannelOffset(image,AlphaPixelChannel);
q[i]=(Quantum) 0;
if ((fabs(alpha) < MagickEpsilon) ||
(fabs(alpha) > (QuantumRange-MagickEpsilon)))
q[i]=(Quantum) QuantumRange;
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
if (x < (ssize_t) image->columns)
break;
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (y < (ssize_t) image->rows)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType SeamlessBlendImage(Image *image,
const Image *source_image,const ssize_t x_offset,const ssize_t y_offset,
const double iterations,const double residual_threshold,const size_t tick,
ExceptionInfo *exception)
{
Image
*add_image,
*crop_image,
*foreground_image,
*mean_image,
*relax_image,
*residual_image;
KernelInfo
*kernel_info;
MagickBooleanType
status = MagickTrue,
verbose = MagickFalse;
RectangleInfo
crop_info = {
source_image->columns,
source_image->rows,
x_offset,
y_offset
};
ssize_t
i;
/*
Seamless blend composite operator.
*/
crop_image=CropImage(image,&crop_info,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
(void) ResetImagePage(crop_image,"0x0+0+0");
add_image=SeamlessAddImage(crop_image,source_image,-1.0,exception);
crop_image=DestroyImage(crop_image);
if (add_image == (Image *) NULL)
return(MagickFalse);
mean_image=SeamlessMeanImage(add_image,source_image,exception);
add_image=DestroyImage(add_image);
if (mean_image == (Image *) NULL)
return(MagickFalse);
relax_image=CloneImage(mean_image,0,0,MagickTrue,exception);
if (relax_image == (Image *) NULL)
{
mean_image=DestroyImage(mean_image);
return(MagickFalse);
}
status=SeamlessThresholdAlphaChannel(mean_image,source_image,exception);
if (status == MagickFalse)
{
relax_image=DestroyImage(relax_image);
mean_image=DestroyImage(mean_image);
return(MagickFalse);
}
residual_image=CloneImage(relax_image,0,0,MagickTrue,exception);
if (residual_image == (Image *) NULL)
{
relax_image=DestroyImage(relax_image);
mean_image=DestroyImage(mean_image);
return(MagickFalse);
}
/*
Convolve relaxed image and blur area of interest.
*/
kernel_info=AcquireKernelInfo("3x3:0,0.25,0,0.25,0,0.25,0,0.25,0",exception);
if (kernel_info == (KernelInfo *) NULL)
{
residual_image=DestroyImage(residual_image);
relax_image=DestroyImage(relax_image);
mean_image=DestroyImage(mean_image);
return(MagickFalse);
}
verbose=IsStringTrue(GetImageArtifact(image,"verbose"));
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,"seamless blending:\n");
for (i=0; i < (ssize_t) iterations; i++)
{
double
residual = 1.0;
Image
*convolve_image;
convolve_image=ConvolveImage(relax_image,kernel_info,exception);
if (convolve_image == (Image *) NULL)
break;
relax_image=DestroyImage(relax_image);
relax_image=convolve_image;
status=CompositeOverImage(relax_image,mean_image,MagickTrue,0,0,exception);
if (status == MagickFalse)
break;
status=SeamlessRMSEResidual(relax_image,residual_image,&residual,exception);
if (status == MagickFalse)
break;
if ((verbose != MagickFalse) && ((i % MagickMax(tick,1)) == 0))
(void) FormatLocaleFile(stderr," %g: %g\n",(double) i,(double) residual);
if (residual < residual_threshold)
{
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr," %g: %g\n",(double) i,(double)
residual);
break;
}
residual_image=DestroyImage(residual_image);
residual_image=CloneImage(relax_image,0,0,MagickTrue,exception);
if (residual_image == (Image *) NULL)
break;
}
kernel_info=DestroyKernelInfo(kernel_info);
mean_image=DestroyImage(mean_image);
residual_image=DestroyImage(residual_image);
/*
Composite relaxed image over the background image.
*/
foreground_image=SeamlessAddImage(source_image,relax_image,1.0,exception);
relax_image=DestroyImage(relax_image);
if (foreground_image == (Image *) NULL)
return(MagickFalse);
status=CompositeOverImage(image,foreground_image,MagickTrue,x_offset,y_offset,
exception);
foreground_image=DestroyImage(foreground_image);
return(status);
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const Image *composite,const CompositeOperator compose,
const MagickBooleanType clip_to_self,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*source_view,
*image_view;
const char
*value;
GeometryInfo
geometry_info;
Image
*canvas_image,
*source_image;
MagickBooleanType
clamp,
compose_sync,
status;
MagickOffsetType
progress;
MagickRealType
amount,
canvas_dissolve,
midpoint,
percent_luma,
percent_chroma,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite != (Image *) NULL);
assert(composite->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
source_image=CloneImage(composite,0,0,MagickTrue,exception);
if (source_image == (const Image *) NULL)
return(MagickFalse);
(void) SetImageColorspace(source_image,image->colorspace,exception);
if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp))
{
status=CompositeOverImage(image,source_image,clip_to_self,x_offset,
y_offset,exception);
source_image=DestroyImage(source_image);
return(status);
}
amount=0.5;
canvas_image=(Image *) NULL;
canvas_dissolve=1.0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
compose_sync=MagickTrue;
value=GetImageArtifact(image,"compose:sync");
if (value != (const char *) NULL)
compose_sync=IsStringTrue(value);
SetGeometryInfo(&geometry_info);
percent_luma=100.0;
percent_chroma=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
if ((source_image->alpha_trait == UndefinedPixelTrait) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlphaChannel(source_image,OpaqueAlphaChannel,exception);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*p;
Quantum
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
ssize_t
i;
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(source_image,i);
PixelTrait source_traits = GetPixelChannelTraits(source_image,
channel);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((source_traits == UndefinedPixelTrait) ||
(traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case IntensityCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*p;
Quantum
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
SetPixelAlpha(image,clamp != MagickFalse ?
ClampPixel(GetPixelIntensity(source_image,p)) :
ClampToQuantum(GetPixelIntensity(source_image,p)),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case CopyAlphaCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify canvas outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case BlurCompositeOp:
{
CacheView
*canvas_view;
double
angle_range,
angle_start,
height,
width;
PixelInfo
pixel;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image by resampling dictated by an overlay gradient map:
X = red_channel; Y = green_channel; compose:args =
x_scale[,y_scale[,angle]].
*/
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
/*
Gather the maximum blur sigma values from user.
*/
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (const char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidSetting","'%s' '%s'","compose:args",value);
source_image=DestroyImage(source_image);
canvas_image=DestroyImage(canvas_image);
return(MagickFalse);
}
/*
Users input sigma now needs to be converted to the EWA ellipse size.
The filter defaults to a sigma of 0.5 so to make this match the users
input the ellipse size needs to be doubled.
*/
width=2.0*geometry_info.rho;
height=width;
if ((flags & HeightValue) != 0)
height=2.0*geometry_info.sigma;
/*
Default the unrotated ellipse width and height axis vectors.
*/
blur.x1=width;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=height;
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
/*
Rotate vectors if a rotation angle is given.
*/
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
angle_start=0.0;
angle_range=0.0;
if ((flags & YValue) != 0 )
{
/*
Lets set a angle range and calculate in the loop.
*/
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Set up a gaussian cylindrical filter for EWA Bluring.
As the minimum ellipse radius of support*1.0 the EWA algorithm
can only produce a minimum blur of 0.5 for Gaussian (support=2.0)
This means that even 'No Blur' will be still a little blurry! The
solution (as well as the problem of preventing any user expert filter
settings, is to set our own user settings, restore them afterwards.
*/
resample_filter=AcquireResampleFilter(image,exception);
SetResampleFilter(resample_filter,GaussianFilter);
/*
Perform the variable blurring of each pixel in image.
*/
GetPixelInfo(image,&pixel);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
if (fabs(angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(source_image,p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
ScaleResampleFilter(resample_filter,
blur.x1*QuantumScale*GetPixelRed(source_image,p),
blur.y1*QuantumScale*GetPixelGreen(source_image,p),
blur.x2*QuantumScale*GetPixelRed(source_image,p),
blur.y2*QuantumScale*GetPixelGreen(source_image,p) );
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel,exception);
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
source_view=DestroyCacheView(source_view);
canvas_view=DestroyCacheView(canvas_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
horizontal_scale,
vertical_scale;
PixelInfo
pixel;
PointInfo
center,
offset;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue | HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0;
vertical_scale=(MagickRealType) (source_image->rows-1)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1)/2.0;
vertical_scale=(MagickRealType) (image->rows-1)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(source_image->columns-1)/200.0;
vertical_scale*=(source_image->rows-1)/200.0;
}
else
{
horizontal_scale*=(image->columns-1)/200.0;
vertical_scale*=(image->rows-1)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) != 0)
center.x=(MagickRealType) ((image->columns-1)/2.0);
else
center.x=(MagickRealType) (x_offset+(source_image->columns-1)/
2.0);
else
if ((flags & AspectValue) != 0)
center.x=geometry_info.xi;
else
center.x=(MagickRealType) (x_offset+geometry_info.xi);
if ((flags & YValue) == 0)
if ((flags & AspectValue) != 0)
center.y=(MagickRealType) ((image->rows-1)/2.0);
else
center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0);
else
if ((flags & AspectValue) != 0)
center.y=geometry_info.psi;
else
center.y=(MagickRealType) (y_offset+geometry_info.psi);
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
/*
Displace the offset.
*/
offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
status=InterpolatePixelInfo(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
if (status == MagickFalse)
break;
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)*
(QuantumScale*GetPixelAlpha(source_image,p));
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
if (x < (ssize_t) source_image->columns)
break;
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
canvas_view=DestroyCacheView(canvas_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
canvas_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
if ((canvas_dissolve-MagickEpsilon) < 0.0)
canvas_dissolve=0.0;
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
}
break;
}
case SeamlessBlendCompositeOp:
{
double
residual_threshold = 0.0002,
iterations = 400.0;
size_t
tick = 100;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
iterations=geometry_info.rho;
if ((flags & SigmaValue) != 0)
residual_threshold=geometry_info.sigma;
if ((flags & XiValue) != 0)
tick=(size_t) geometry_info.xi;
}
status=SeamlessBlendImage(image,composite,x_offset,y_offset,iterations,
residual_threshold,tick,exception);
source_image=DestroyImage(source_image);
return(status);
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
if (flags == NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidGeometry","`%s'",value);
}
break;
}
case ModulateCompositeOp:
{
/*
Determine the luma and chroma scale.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_luma=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_chroma=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
MagickRealType
blue,
chroma,
green,
hue,
luma,
red;
PixelInfo
canvas_pixel,
source_pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*(ssize_t) GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
chroma=0.0;
luma=0.0;
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
DcaDa,
Sa,
SaSca,
Sc,
Sca;
ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
switch (compose)
{
case AlphaCompositeOp:
case ChangeMaskCompositeOp:
case CopyAlphaCompositeOp:
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case OutCompositeOp:
case SrcInCompositeOp:
case SrcOutCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
break;
}
case ClearCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=0.0;
break;
}
case BlendCompositeOp:
case DissolveCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=canvas_dissolve*GetPixelAlpha(source_image,source);
else
pixel=(MagickRealType) source[channel];
break;
}
default:
{
pixel=(MagickRealType) source[channel];
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
switch (compose)
{
case BumpmapCompositeOp:
case ColorBurnCompositeOp:
case ColorDodgeCompositeOp:
case DarkenCompositeOp:
case DifferenceCompositeOp:
case DivideDstCompositeOp:
case DivideSrcCompositeOp:
case ExclusionCompositeOp:
case FreezeCompositeOp:
case HardLightCompositeOp:
case HardMixCompositeOp:
case InterpolateCompositeOp:
case LightenCompositeOp:
case LinearBurnCompositeOp:
case LinearDodgeCompositeOp:
case LinearLightCompositeOp:
case MathematicsCompositeOp:
case MinusDstCompositeOp:
case MinusSrcCompositeOp:
case MultiplyCompositeOp:
case NegateCompositeOp:
case OverlayCompositeOp:
case PegtopLightCompositeOp:
case PinLightCompositeOp:
case ReflectCompositeOp:
case ScreenCompositeOp:
case SoftBurnCompositeOp:
case SoftDodgeCompositeOp:
case SoftLightCompositeOp:
case StampCompositeOp:
case VividLightCompositeOp:
{
alpha=RoundToUnity(Sa+Da-Sa*Da);
break;
}
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
{
alpha=Sa*Da;
break;
}
case DissolveCompositeOp:
{
alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+
canvas_dissolve*Da;
break;
}
case DstOverCompositeOp:
case OverCompositeOp:
case SrcOverCompositeOp:
{
alpha=Sa+Da-Sa*Da;
break;
}
case DstOutCompositeOp:
{
alpha=Da*(1.0-Sa);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
alpha=Sa*(1.0-Da);
break;
}
case BlendCompositeOp:
case PlusCompositeOp:
{
alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da);
break;
}
case XorCompositeOp:
{
alpha=Sa+Da-2.0*Sa*Da;
break;
}
case ModulusAddCompositeOp:
{
if ((Sa+Da) <= 1.0)
{
alpha=(Sa+Da);
break;
}
alpha=((Sa+Da)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if ((Sa-Da) >= 0.0)
{
alpha=(Sa-Da);
break;
}
alpha=((Sa-Da)+1.0);
break;
}
default:
{
alpha=1.0;
break;
}
}
switch (compose)
{
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case ModulateCompositeOp:
case RMSECompositeOp:
case SaturateCompositeOp:
{
GetPixelInfoPixel(source_image,p,&source_pixel);
GetPixelInfoPixel(image,q,&canvas_pixel);
break;
}
default:
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel,
sans;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits = GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((channel == AlphaPixelChannel) &&
((traits & UpdatePixelTrait) != 0))
{
/*
Set alpha channel.
*/
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case CopyBlackCompositeOp:
case CopyBlueCompositeOp:
case CopyCyanCompositeOp:
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
case CopyRedCompositeOp:
case CopyYellowCompositeOp:
case SrcAtopCompositeOp:
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case BumpmapCompositeOp:
{
pixel=GetPixelIntensity(source_image,p)*Da;
break;
}
case ChangeMaskCompositeOp:
{
if (IsFuzzyEquivalencePixel(source_image,p,image,q) != MagickFalse)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=QuantumRange*Da;
break;
}
case ClearCompositeOp:
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case RMSECompositeOp:
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Da;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Sa;
break;
}
if (Sa < Da)
{
pixel=QuantumRange*Da;
break;
}
pixel=QuantumRange*Sa;
break;
}
case CopyAlphaCompositeOp:
{
if (source_image->alpha_trait == UndefinedPixelTrait)
pixel=GetPixelIntensity(source_image,p);
else
pixel=QuantumRange*Sa;
break;
}
case BlurCompositeOp:
case CopyCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
case DstAtopCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case DarkenIntensityCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=GetPixelIntensity(source_image,p) <
GetPixelIntensity(image,q) ? Sa : Da;
break;
}
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*fabs((double) (Sa-Da));
break;
}
case FreezeCompositeOp:
{
pixel=QuantumRange*(1.0-(1.0-Sa)*(1.0-Sa)*
PerceptibleReciprocal(Da));
if (pixel < 0.0)
pixel=0.0;
break;
}
case InterpolateCompositeOp:
{
pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sa)-0.25*
cos(MagickPI*Da));
break;
}
case LightenIntensityCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=GetPixelIntensity(source_image,p) >
GetPixelIntensity(image,q) ? Sa : Da;
break;
}
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case ModulateCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case MultiplyCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=QuantumRange*Sa*Da;
break;
}
pixel=QuantumRange*alpha;
break;
}
case NegateCompositeOp:
{
pixel=QuantumRange*((1.0-Sa-Da));
break;
}
case ReflectCompositeOp:
{
pixel=QuantumRange*(Sa*Sa*PerceptibleReciprocal(1.0-Da));
if (pixel > QuantumRange)
pixel=QuantumRange;
break;
}
case StampCompositeOp:
{
pixel=QuantumRange*(Sa+Da*Da-1.0);
break;
}
case StereoCompositeOp:
{
pixel=QuantumRange*(Sa+Da)/2;
break;
}
default:
{
pixel=QuantumRange*alpha;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
if (source_traits == UndefinedPixelTrait)
continue;
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Dc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
SaSca=Sa*PerceptibleReciprocal(Sca);
DcaDa=Dca*PerceptibleReciprocal(Da);
switch (compose)
{
case DarkenCompositeOp:
case LightenCompositeOp:
case ModulusSubtractCompositeOp:
{
gamma=PerceptibleReciprocal(1.0-alpha);
break;
}
default:
{
gamma=PerceptibleReciprocal(alpha);
break;
}
}
pixel=Dc;
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa));
break;
}
case BlendCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc);
break;
}
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sca;
break;
}
case BlurCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
{
pixel=Sc;
break;
}
case BumpmapCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc;
break;
}
case ChangeMaskCompositeOp:
{
pixel=Dc;
break;
}
case ClearCompositeOp:
{
pixel=0.0;
break;
}
case ColorBurnCompositeOp:
{
if ((Sca == 0.0) && (Dca == Da))
{
pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa));
break;
}
if (Sca == 0.0)
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)*
SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorDodgeCompositeOp:
{
if ((Sca*Da+Dca*Sa) >= Sa*Da)
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
else
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&sans,&sans,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case CopyAlphaCompositeOp:
{
pixel=Dc;
break;
}
case CopyBlackCompositeOp:
{
if (channel == BlackPixelChannel)
pixel=(MagickRealType) GetPixelBlack(source_image,p);
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
if (channel == BluePixelChannel)
pixel=(MagickRealType) GetPixelBlue(source_image,p);
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
if (channel == GreenPixelChannel)
pixel=(MagickRealType) GetPixelGreen(source_image,p);
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case DarkenCompositeOp:
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
if (compose_sync == MagickFalse)
{
pixel=MagickMin(Sc,Dc);
break;
}
if ((Sca*Da) < (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case DarkenIntensityCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=GetPixelIntensity(source_image,p) <
GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case DifferenceCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=fabs((double) Sc-Dc);
break;
}
pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa));
break;
}
case DissolveCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa*
canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc);
break;
}
case DivideDstCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=QuantumRange*(Sc/PerceptibleReciprocal(Dc));
break;
}
if ((fabs((double) Sca) < MagickEpsilon) &&
(fabs((double) Dca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (fabs((double) Dca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case DivideSrcCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=QuantumRange*(Dc/PerceptibleReciprocal(Sc));
break;
}
if ((fabs((double) Dca) < MagickEpsilon) &&
(fabs((double) Sca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
if (fabs((double) Sca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
case DstAtopCompositeOp:
{
pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da));
break;
}
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Dca;
break;
}
case DstInCompositeOp:
{
pixel=QuantumRange*gamma*(Dca*Sa);
break;
}
case DstOutCompositeOp:
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
case DstOverCompositeOp:
{
pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da));
break;
}
case ExclusionCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case FreezeCompositeOp:
{
pixel=QuantumRange*gamma*(1.0-(1.0-Sca)*(1.0-Sca)*
PerceptibleReciprocal(Dca));
if (pixel < 0.0)
pixel=0.0;
break;
}
case HardLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-
Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardMixCompositeOp:
{
pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange);
break;
}
case HueCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&sans,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case InCompositeOp:
case SrcInCompositeOp:
{
pixel=QuantumRange*(Sca*Da);
break;
}
case InterpolateCompositeOp:
{
pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sca)-0.25*
cos(MagickPI*Dca));
break;
}
case LinearBurnCompositeOp:
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da);
break;
}
case LinearDodgeCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc);
break;
}
case LinearLightCompositeOp:
{
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca);
break;
}
case LightenCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=MagickMax(Sc,Dc);
break;
}
if ((Sca*Da) > (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case LightenIntensityCompositeOp:
{
/*
Lighten is equivalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
if (compose_sync == MagickFalse)
{
pixel=GetPixelIntensity(source_image,p) >
GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case LuminizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&sans,&luma);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case MathematicsCompositeOp:
{
/*
'Mathematics' a free form user control mathematical composition
is defined as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite
as a command separated 'geometry' string in "compose:args" image
artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
if (compose_sync == MagickFalse)
{
pixel=geometry_info.rho*Sc*Dc+geometry_info.sigma*Sc+
geometry_info.xi*Dc+geometry_info.psi;
break;
}
pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+
geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+
geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case MinusDstCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=Dc-Sc;
break;
}
pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa);
break;
}
case MinusSrcCompositeOp:
{
/*
Minus source from canvas.
f(Sc,Dc) = Sc - Dc
*/
if (compose_sync == MagickFalse)
{
pixel=Sc-Dc;
break;
}
pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint);
if (offset == 0)
{
pixel=Dc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
luma+=(0.01*percent_luma*offset)/midpoint;
chroma*=0.01*percent_chroma;
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ModulusAddCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=(Sc+Dc);
break;
}
if ((Sca+Dca) <= 1.0)
{
pixel=QuantumRange*(Sca+Dca);
break;
}
pixel=QuantumRange*((Sca+Dca)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=(Sc-Dc);
break;
}
if ((Sca-Dca) >= 0.0)
{
pixel=QuantumRange*(Sca-Dca);
break;
}
pixel=QuantumRange*((Sca-Dca)+1.0);
break;
}
case MultiplyCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=QuantumScale*Dc*Sc;
break;
}
pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case NegateCompositeOp:
{
pixel=QuantumRange*(1.0-fabs(1.0-Sca-Dca));
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da));
break;
}
case OverCompositeOp:
case SrcOverCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
break;
}
case OverlayCompositeOp:
{
if ((2.0*Dca) < Da)
{
pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0-
Da));
break;
}
pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+
Sca*(1.0-Da));
break;
}
case PegtopLightCompositeOp:
{
/*
PegTop: A Soft-Light alternative: A continuous version of the
Softlight function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs((double) Da) < MagickEpsilon)
{
pixel=QuantumRange*gamma*Sca;
break;
}
pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-
Da)+Dca*(1.0-Sa));
break;
}
case PinLightCompositeOp:
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if ((Dca*Sa) < (Da*(2.0*Sca-Sa)))
{
pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
break;
}
if ((Dca*Sa) > (2.0*Sca*Da))
{
pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca);
break;
}
case PlusCompositeOp:
{
if (compose_sync == MagickFalse)
{
pixel=(Dc+Sc);
break;
}
pixel=QuantumRange*(Sca+Dca);
break;
}
case ReflectCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Sca*PerceptibleReciprocal(1.0-Dca));
if (pixel > QuantumRange)
pixel=QuantumRange;
break;
}
case RMSECompositeOp:
{
double
gray;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
gray=sqrt(
(canvas_pixel.red-source_pixel.red)*
(canvas_pixel.red-source_pixel.red)+
(canvas_pixel.green-source_pixel.green)*
(canvas_pixel.green-source_pixel.green)+
(canvas_pixel.blue-source_pixel.blue)*
(canvas_pixel.blue-source_pixel.blue)/3.0);
switch (channel)
{
case RedPixelChannel: pixel=gray; break;
case GreenPixelChannel: pixel=gray; break;
case BluePixelChannel: pixel=gray; break;
default: pixel=Dc; break;
}
break;
}
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ScreenCompositeOp:
{
/*
Screen: a negated multiply:
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
if (compose_sync == MagickFalse)
{
pixel=Sc+Dc-Sc*Dc;
break;
}
pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca);
break;
}
case SoftBurnCompositeOp:
{
if ((Sca+Dca) < 1.0)
pixel=QuantumRange*gamma*(0.5*Dca*PerceptibleReciprocal(1.0-Sca));
else
pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Sca)*
PerceptibleReciprocal(Dca));
break;
}
case SoftDodgeCompositeOp:
{
if ((Sca+Dca) < 1.0)
pixel=QuantumRange*gamma*(0.5*Sca*PerceptibleReciprocal(1.0-Dca));
else
pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Dca)*
PerceptibleReciprocal(Sca));
break;
}
case SoftLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa*
(4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)-
DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case StampCompositeOp:
{
pixel=QuantumRange*(Sca+Dca*Dca-1.0);
break;
}
case StereoCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case ThresholdCompositeOp:
{
MagickRealType
delta;
delta=Sc-Dc;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
{
pixel=gamma*Dc;
break;
}
pixel=gamma*(Dc+delta*amount);
break;
}
case VividLightCompositeOp:
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs((double) Sa) < MagickEpsilon) ||
(fabs((double) (Sca-Sa)) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if ((2.0*Sca) <= Sa)
{
pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)*
PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0*
(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case XorCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
default:
{
pixel=Sc;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (canvas_image != (Image * ) NULL)
canvas_image=DestroyImage(canvas_image);
else
source_image=DestroyImage(source_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture_image: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture,
ExceptionInfo *exception)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
Image
*texture_image;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
texture_image=CloneImage(texture,0,0,MagickTrue,exception);
if (texture_image == (const Image *) NULL)
return(MagickFalse);
(void) TransformImageColorspace(texture_image,image->colorspace,exception);
(void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod,
exception);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) ||
(image->alpha_trait != UndefinedPixelTrait) ||
(texture_image->alpha_trait != UndefinedPixelTrait)))
{
/*
Tile texture onto the image background.
*/
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows)
{
ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,texture_image,image->compose,
MagickTrue,x+texture_image->tile_offset.x,y+
texture_image->tile_offset.y,exception);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
texture_image=DestroyImage(texture_image);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
texture_view=AcquireVirtualCacheView(texture_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(texture_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*p,
*pixels;
ssize_t
x;
Quantum
*q;
size_t
width;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,
(y+texture_image->tile_offset.y) % texture_image->rows,
texture_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
ssize_t
j;
p=pixels;
width=texture_image->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
for (j=0; j < (ssize_t) width; j++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(texture_image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait texture_traits=GetPixelChannelTraits(texture_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(texture_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
texture_image=DestroyImage(texture_image);
return(status);
}
|
GB_unop__identity_int16_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_fp64)
// op(A') function: GB (_unop_tran__identity_int16_fp64)
// C type: int16_t
// A type: double
// cast: int16_t cij = GB_cast_to_int16_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = GB_cast_to_int16_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_fp64)
(
int16_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lu.pluto_ancc.old_rtile.c |
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
double L[N][N];
double U[N][N];
double A[N][N +13];
void init_arrays()
{
int i, j, k;
/* have to initialize this matrix properly to prevent
* division by zero
*/
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
L[i][j] = 0.0;
U[i][j] = 0.0;
}
}
for (i=0; i<N; i++) {
for (j=0; j<=i; j++) {
L[i][j] = i+j+1;
U[j][i] = i+j+1;
}
}
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
for (k=0; k<N; k++) {
A[i][j] += L[i][k]*U[k][j];
}
}
}
}
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main()
{
init_arrays();
double annot_t_start=0, annot_t_end=0, annot_t_total=0;
int annot_i;
for (annot_i=0; annot_i<REPS; annot_i++)
{
annot_t_start = rtclock();
register int i,j,k;
register int c1t, c2t, c3t, c4t, c5t, c6t, c7t, c8t, c9t, c10t, c11t, c12t;
register int newlb_c1, newlb_c2, newlb_c3, newlb_c4, newlb_c5, newlb_c6,
newlb_c7, newlb_c8, newlb_c9, newlb_c10, newlb_c11, newlb_c12;
register int newub_c1, newub_c2, newub_c3, newub_c4, newub_c5, newub_c6,
newub_c7, newub_c8, newub_c9, newub_c10, newub_c11, newub_c12;
/*@ begin PolySyn(
l1_tiles = [T1_1,T1_2,T1_3];
l2_tiles = [T2_1,T2_2,T2_3];
hotspot_permut = PERM_B;
unroll_factors = [U1,U2,U3];
parallelize = PAR;
scalar_replace = SCREP;
icc_vectorize = IVEC;
) @*/
int c1, c2, c3, c4, c5, c6, c7, c8, c9;
register int lb, ub, lb1, ub1, lb2, ub2;
/* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 2.36s. */
for (c1=-1;c1<=floord(3*N-5,128);c1++) {
lb1=max(max(ceild(64*c1-N+2,64),ceild(32*c1-63,96)),0);
ub1=min(floord(N-1,128),floord(64*c1+63,64));
#pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9)
for (c2=lb1; c2<=ub1; c2++) {
for (c3=max(ceild(32*c1-32*c2-1953,2016),ceild(32*c1-32*c2-31,32));c3<=floord(N-1,64);c3++) {
for (c4=max(max(2*c1-2*c2-64*c3-62,2*c1-2*c2),0);c4<=min(min(min(min(floord(64*c2+63,16),2*c1-2*c2+1),floord(992*c3+961,16)),floord(32*c3+31,16)),floord(N-2,32));c4++) {
for (c5=max(max(ceild(16*c4-15,16),0),4*c2);c5<=min(4*c2+3,floord(N-1,32));c5++) {
for (c6=max(max(max(max(ceild(16*c4-465,496),ceild(2*c1-2*c2-2*c3-c4-31,31)),ceild(-2*c1+2*c2+2*c3+c4-31,33)),2*c3),ceild(16*c4-15,16));c6<=min(2*c3+1,floord(N-1,32));c6++) {
if ((c1 == c2+c3) && (c4 == c6)) {
for (c7=max(0,32*c6);c7<=min(min(32*c6+30,N-2),32*c5+30);c7++) {
for (c8=max(32*c5,c7+1);c8<=min(N-1,32*c5+31);c8++) {
A[c7][c8]=A[c7][c8]/A[c7][c7] ;
for (c9=c7+1;c9<=min(N-1,32*c6+31);c9++) {
A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8] ;
}
}
}
}
/*@ begin Loop(
transform Composite(
regtile = (['c7', 'c8', 'c9'],[8, 8, 1]),
permut = [(['c7'],['c8'],['c9'])],
scalarreplace = (False, 'double'))
for (c7=max(32*c4,0);c7<=min(min(32*c6-1,32*c4+31),32*c5+30);c7++) {
for (c8=max(c7+1,32*c5);c8<=min(32*c5+31,N-1);c8++)
for (c9=32*c6;c9<=min(N-1,32*c6+31);c9++)
{
A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8] ;
}
}
) @*/{
for (c7t=max(32*c4,0); c7t<=min(min(32*c6-1,32*c4+31),32*c5+30)-7; c7t=c7t+8) {
for (c8t=max(c7t+1,32*c5); c8t<=min(32*c5+31,N-1)-7; c8t=c8t+8)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8t]=A[c9][c8t]-A[c9][c7t]*A[c7t][c8t];
A[c9][(c8t+1)]=A[c9][(c8t+1)]-A[c9][c7t]*A[c7t][(c8t+1)];
A[c9][(c8t+2)]=A[c9][(c8t+2)]-A[c9][c7t]*A[c7t][(c8t+2)];
A[c9][(c8t+3)]=A[c9][(c8t+3)]-A[c9][c7t]*A[c7t][(c8t+3)];
A[c9][(c8t+4)]=A[c9][(c8t+4)]-A[c9][c7t]*A[c7t][(c8t+4)];
A[c9][(c8t+5)]=A[c9][(c8t+5)]-A[c9][c7t]*A[c7t][(c8t+5)];
A[c9][(c8t+6)]=A[c9][(c8t+6)]-A[c9][c7t]*A[c7t][(c8t+6)];
A[c9][(c8t+7)]=A[c9][(c8t+7)]-A[c9][c7t]*A[c7t][(c8t+7)];
}
for (c8=c8t; c8<=min(32*c5+31,N-1); c8=c8+1)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8]=A[c9][c8]-A[c9][c7t]*A[c7t][c8];
}
for (c8t=max((c7t+1)+1,32*c5); c8t<=min(32*c5+31,N-1)-7; c8t=c8t+8)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8t]=A[c9][c8t]-A[c9][(c7t+1)]*A[(c7t+1)][c8t];
A[c9][(c8t+1)]=A[c9][(c8t+1)]-A[c9][(c7t+1)]*A[(c7t+1)][(c8t+1)];
A[c9][(c8t+2)]=A[c9][(c8t+2)]-A[c9][(c7t+1)]*A[(c7t+1)][(c8t+2)];
A[c9][(c8t+3)]=A[c9][(c8t+3)]-A[c9][(c7t+1)]*A[(c7t+1)][(c8t+3)];
A[c9][(c8t+4)]=A[c9][(c8t+4)]-A[c9][(c7t+1)]*A[(c7t+1)][(c8t+4)];
A[c9][(c8t+5)]=A[c9][(c8t+5)]-A[c9][(c7t+1)]*A[(c7t+1)][(c8t+5)];
A[c9][(c8t+6)]=A[c9][(c8t+6)]-A[c9][(c7t+1)]*A[(c7t+1)][(c8t+6)];
A[c9][(c8t+7)]=A[c9][(c8t+7)]-A[c9][(c7t+1)]*A[(c7t+1)][(c8t+7)];
}
for (c8=c8t; c8<=min(32*c5+31,N-1); c8=c8+1)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8]=A[c9][c8]-A[c9][(c7t+1)]*A[(c7t+1)][c8];
}
for (c8t=max((c7t+2)+1,32*c5); c8t<=min(32*c5+31,N-1)-7; c8t=c8t+8)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8t]=A[c9][c8t]-A[c9][(c7t+2)]*A[(c7t+2)][c8t];
A[c9][(c8t+1)]=A[c9][(c8t+1)]-A[c9][(c7t+2)]*A[(c7t+2)][(c8t+1)];
A[c9][(c8t+2)]=A[c9][(c8t+2)]-A[c9][(c7t+2)]*A[(c7t+2)][(c8t+2)];
A[c9][(c8t+3)]=A[c9][(c8t+3)]-A[c9][(c7t+2)]*A[(c7t+2)][(c8t+3)];
A[c9][(c8t+4)]=A[c9][(c8t+4)]-A[c9][(c7t+2)]*A[(c7t+2)][(c8t+4)];
A[c9][(c8t+5)]=A[c9][(c8t+5)]-A[c9][(c7t+2)]*A[(c7t+2)][(c8t+5)];
A[c9][(c8t+6)]=A[c9][(c8t+6)]-A[c9][(c7t+2)]*A[(c7t+2)][(c8t+6)];
A[c9][(c8t+7)]=A[c9][(c8t+7)]-A[c9][(c7t+2)]*A[(c7t+2)][(c8t+7)];
}
for (c8=c8t; c8<=min(32*c5+31,N-1); c8=c8+1)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8]=A[c9][c8]-A[c9][(c7t+2)]*A[(c7t+2)][c8];
}
for (c8t=max((c7t+3)+1,32*c5); c8t<=min(32*c5+31,N-1)-7; c8t=c8t+8)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8t]=A[c9][c8t]-A[c9][(c7t+3)]*A[(c7t+3)][c8t];
A[c9][(c8t+1)]=A[c9][(c8t+1)]-A[c9][(c7t+3)]*A[(c7t+3)][(c8t+1)];
A[c9][(c8t+2)]=A[c9][(c8t+2)]-A[c9][(c7t+3)]*A[(c7t+3)][(c8t+2)];
A[c9][(c8t+3)]=A[c9][(c8t+3)]-A[c9][(c7t+3)]*A[(c7t+3)][(c8t+3)];
A[c9][(c8t+4)]=A[c9][(c8t+4)]-A[c9][(c7t+3)]*A[(c7t+3)][(c8t+4)];
A[c9][(c8t+5)]=A[c9][(c8t+5)]-A[c9][(c7t+3)]*A[(c7t+3)][(c8t+5)];
A[c9][(c8t+6)]=A[c9][(c8t+6)]-A[c9][(c7t+3)]*A[(c7t+3)][(c8t+6)];
A[c9][(c8t+7)]=A[c9][(c8t+7)]-A[c9][(c7t+3)]*A[(c7t+3)][(c8t+7)];
}
for (c8=c8t; c8<=min(32*c5+31,N-1); c8=c8+1)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8]=A[c9][c8]-A[c9][(c7t+3)]*A[(c7t+3)][c8];
}
for (c8t=max((c7t+4)+1,32*c5); c8t<=min(32*c5+31,N-1)-7; c8t=c8t+8)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8t]=A[c9][c8t]-A[c9][(c7t+4)]*A[(c7t+4)][c8t];
A[c9][(c8t+1)]=A[c9][(c8t+1)]-A[c9][(c7t+4)]*A[(c7t+4)][(c8t+1)];
A[c9][(c8t+2)]=A[c9][(c8t+2)]-A[c9][(c7t+4)]*A[(c7t+4)][(c8t+2)];
A[c9][(c8t+3)]=A[c9][(c8t+3)]-A[c9][(c7t+4)]*A[(c7t+4)][(c8t+3)];
A[c9][(c8t+4)]=A[c9][(c8t+4)]-A[c9][(c7t+4)]*A[(c7t+4)][(c8t+4)];
A[c9][(c8t+5)]=A[c9][(c8t+5)]-A[c9][(c7t+4)]*A[(c7t+4)][(c8t+5)];
A[c9][(c8t+6)]=A[c9][(c8t+6)]-A[c9][(c7t+4)]*A[(c7t+4)][(c8t+6)];
A[c9][(c8t+7)]=A[c9][(c8t+7)]-A[c9][(c7t+4)]*A[(c7t+4)][(c8t+7)];
}
for (c8=c8t; c8<=min(32*c5+31,N-1); c8=c8+1)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8]=A[c9][c8]-A[c9][(c7t+4)]*A[(c7t+4)][c8];
}
for (c8t=max((c7t+5)+1,32*c5); c8t<=min(32*c5+31,N-1)-7; c8t=c8t+8)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8t]=A[c9][c8t]-A[c9][(c7t+5)]*A[(c7t+5)][c8t];
A[c9][(c8t+1)]=A[c9][(c8t+1)]-A[c9][(c7t+5)]*A[(c7t+5)][(c8t+1)];
A[c9][(c8t+2)]=A[c9][(c8t+2)]-A[c9][(c7t+5)]*A[(c7t+5)][(c8t+2)];
A[c9][(c8t+3)]=A[c9][(c8t+3)]-A[c9][(c7t+5)]*A[(c7t+5)][(c8t+3)];
A[c9][(c8t+4)]=A[c9][(c8t+4)]-A[c9][(c7t+5)]*A[(c7t+5)][(c8t+4)];
A[c9][(c8t+5)]=A[c9][(c8t+5)]-A[c9][(c7t+5)]*A[(c7t+5)][(c8t+5)];
A[c9][(c8t+6)]=A[c9][(c8t+6)]-A[c9][(c7t+5)]*A[(c7t+5)][(c8t+6)];
A[c9][(c8t+7)]=A[c9][(c8t+7)]-A[c9][(c7t+5)]*A[(c7t+5)][(c8t+7)];
}
for (c8=c8t; c8<=min(32*c5+31,N-1); c8=c8+1)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8]=A[c9][c8]-A[c9][(c7t+5)]*A[(c7t+5)][c8];
}
for (c8t=max((c7t+6)+1,32*c5); c8t<=min(32*c5+31,N-1)-7; c8t=c8t+8)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8t]=A[c9][c8t]-A[c9][(c7t+6)]*A[(c7t+6)][c8t];
A[c9][(c8t+1)]=A[c9][(c8t+1)]-A[c9][(c7t+6)]*A[(c7t+6)][(c8t+1)];
A[c9][(c8t+2)]=A[c9][(c8t+2)]-A[c9][(c7t+6)]*A[(c7t+6)][(c8t+2)];
A[c9][(c8t+3)]=A[c9][(c8t+3)]-A[c9][(c7t+6)]*A[(c7t+6)][(c8t+3)];
A[c9][(c8t+4)]=A[c9][(c8t+4)]-A[c9][(c7t+6)]*A[(c7t+6)][(c8t+4)];
A[c9][(c8t+5)]=A[c9][(c8t+5)]-A[c9][(c7t+6)]*A[(c7t+6)][(c8t+5)];
A[c9][(c8t+6)]=A[c9][(c8t+6)]-A[c9][(c7t+6)]*A[(c7t+6)][(c8t+6)];
A[c9][(c8t+7)]=A[c9][(c8t+7)]-A[c9][(c7t+6)]*A[(c7t+6)][(c8t+7)];
}
for (c8=c8t; c8<=min(32*c5+31,N-1); c8=c8+1)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8]=A[c9][c8]-A[c9][(c7t+6)]*A[(c7t+6)][c8];
}
for (c8t=max((c7t+7)+1,32*c5); c8t<=min(32*c5+31,N-1)-7; c8t=c8t+8)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8t]=A[c9][c8t]-A[c9][(c7t+7)]*A[(c7t+7)][c8t];
A[c9][(c8t+1)]=A[c9][(c8t+1)]-A[c9][(c7t+7)]*A[(c7t+7)][(c8t+1)];
A[c9][(c8t+2)]=A[c9][(c8t+2)]-A[c9][(c7t+7)]*A[(c7t+7)][(c8t+2)];
A[c9][(c8t+3)]=A[c9][(c8t+3)]-A[c9][(c7t+7)]*A[(c7t+7)][(c8t+3)];
A[c9][(c8t+4)]=A[c9][(c8t+4)]-A[c9][(c7t+7)]*A[(c7t+7)][(c8t+4)];
A[c9][(c8t+5)]=A[c9][(c8t+5)]-A[c9][(c7t+7)]*A[(c7t+7)][(c8t+5)];
A[c9][(c8t+6)]=A[c9][(c8t+6)]-A[c9][(c7t+7)]*A[(c7t+7)][(c8t+6)];
A[c9][(c8t+7)]=A[c9][(c8t+7)]-A[c9][(c7t+7)]*A[(c7t+7)][(c8t+7)];
}
for (c8=c8t; c8<=min(32*c5+31,N-1); c8=c8+1)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8]=A[c9][c8]-A[c9][(c7t+7)]*A[(c7t+7)][c8];
}
}
for (c7=c7t; c7<=min(min(32*c6-1,32*c4+31),32*c5+30); c7=c7+1) {
for (c8t=max(c7+1,32*c5); c8t<=min(32*c5+31,N-1)-7; c8t=c8t+8)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8t]=A[c9][c8t]-A[c9][c7]*A[c7][c8t];
A[c9][(c8t+1)]=A[c9][(c8t+1)]-A[c9][c7]*A[c7][(c8t+1)];
A[c9][(c8t+2)]=A[c9][(c8t+2)]-A[c9][c7]*A[c7][(c8t+2)];
A[c9][(c8t+3)]=A[c9][(c8t+3)]-A[c9][c7]*A[c7][(c8t+3)];
A[c9][(c8t+4)]=A[c9][(c8t+4)]-A[c9][c7]*A[c7][(c8t+4)];
A[c9][(c8t+5)]=A[c9][(c8t+5)]-A[c9][c7]*A[c7][(c8t+5)];
A[c9][(c8t+6)]=A[c9][(c8t+6)]-A[c9][c7]*A[c7][(c8t+6)];
A[c9][(c8t+7)]=A[c9][(c8t+7)]-A[c9][c7]*A[c7][(c8t+7)];
}
for (c8=c8t; c8<=min(32*c5+31,N-1); c8=c8+1)
for (c9=32*c6; c9<=min(N-1,32*c6+31); c9++ ) {
A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8];
}
}
}
/*@ end @*/
if ((c1 == c2+c3) && (-c4 == -c6) && (c4 <= min(floord(N-33,32),floord(32*c5-1,32)))) {
for (c8=max(32*c5,32*c4+32);c8<=min(N-1,32*c5+31);c8++) {
A[32*c4+31][c8]=A[32*c4+31][c8]/A[32*c4+31][32*c4+31] ;
}
}
}
}
}
}
}
}
/* End of CLooG code */
/*@ end @*/
annot_t_end = rtclock();
annot_t_total += annot_t_end - annot_t_start;
}
annot_t_total = annot_t_total / REPS;
printf("%f\n", annot_t_total);
return ((int) A[0][0]);
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
learner.c | /* =============================================================================
*
* learn.c
* -- Learns structure of Bayesian net from data
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* The penalized log-likelihood score (Friedman & Yahkani, 1996) is used to
* evaluated the "goodness" of a Bayesian net:
*
* M n_j
* --- --- ---
* -N_params * ln(R) / 2 + R > > > P((a_j = v), X_j) ln P(a_j = v | X_j)
* --- --- ---
* j=1 X_j v=1
*
* Where:
*
* N_params total number of parents across all variables
* R number of records
* M number of variables
* X_j parents of the jth variable
* n_j number of attributes of the jth variable
* a_j attribute
*
* The second summation of X_j varies across all possible assignments to the
* values of the parents X_j.
*
* In the code:
*
* "local log likelihood" is P((a_j = v), X_j) ln P(a_j = v | X_j)
* "log likelihood" is everything to the right of the '+', i.e., "R ... X_j)"
* "base penalty" is -ln(R) / 2
* "penalty" is N_params * -ln(R) / 2
* "score" is the entire expression
*
* For more notes, refer to:
*
* A. Moore and M.-S. Lee. Cached sufficient statistics for efficient machine
* learning with large datasets. Journal of Artificial Intelligence Research 8
* (1998), pp 67-91.
*
* =============================================================================
*
* The search strategy uses a combination of local and global structure search.
* Similar to the technique described in:
*
* D. M. Chickering, D. Heckerman, and C. Meek. A Bayesian approach to learning
* Bayesian networks with local structure. In Proceedings of Thirteenth
* Conference on Uncertainty in Artificial Intelligence (1997), pp. 80-89.
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <math.h>
#include <stdlib.h>
#include "adtree.h"
#include "data.h"
#include "learner.h"
#include "list.h"
#include "net.h"
#include "operation.h"
#include "query.h"
#include "random.h"
#include "thread.h"
#include "timer.h"
#include "utility.h"
#include "vector.h"
struct learner_task {
operation_t op;
long fromId;
long toId;
float score;
};
typedef struct findBestTaskArg {
long toId;
learner_t* learnerPtr;
query_t* queries;
vector_t* queryVectorPtr;
vector_t* parentQueryVectorPtr;
long numTotalParent;
float basePenalty;
float baseLogLikelihood;
bitmap_t* bitmapPtr;
queue_t* workQueuePtr;
vector_t* aQueryVectorPtr;
vector_t* bQueryVectorPtr;
} findBestTaskArg_t;
#ifdef TEST_LEARNER
long global_maxNumEdgeLearned = -1L;
long global_insertPenalty = 1;
float global_operationQualityFactor = 1.0F;
#else
extern long global_insertPenalty;
extern long global_maxNumEdgeLearned;
extern float global_operationQualityFactor;
#endif
/* =============================================================================
* DECLARATION OF TM_CALLABLE FUNCTIONS
* =============================================================================
*/
TM_CALLABLE
static learner_task_t
TMfindBestReverseTask (TM_ARGDECL findBestTaskArg_t* argPtr);
TM_CALLABLE
static learner_task_t
TMfindBestInsertTask (TM_ARGDECL findBestTaskArg_t* argPtr);
TM_CALLABLE
static learner_task_t
TMfindBestRemoveTask (TM_ARGDECL findBestTaskArg_t* argPtr);
/* =============================================================================
* compareTask
* -- Want greatest score first
* -- For list
* =============================================================================
*/
static long
compareTask (const void* aPtr, const void* bPtr)
{
learner_task_t* aTaskPtr = (learner_task_t*)aPtr;
learner_task_t* bTaskPtr = (learner_task_t*)bPtr;
float aScore = aTaskPtr->score;
float bScore = bTaskPtr->score;
if (aScore < bScore) {
return 1;
} else if (aScore > bScore) {
return -1;
} else {
return (aTaskPtr->toId - bTaskPtr->toId);
}
}
static long
TMcompareTask (TM_ARGDECL const void* aPtr, const void* bPtr)
{
learner_task_t* aTaskPtr = (learner_task_t*)aPtr;
learner_task_t* bTaskPtr = (learner_task_t*)bPtr;
float aScore = aTaskPtr->score;
float bScore = bTaskPtr->score;
if (aScore < bScore) {
return 1;
} else if (aScore > bScore) {
return -1;
} else {
return (aTaskPtr->toId - bTaskPtr->toId);
}
}
comparator_t learner_comparetask(&compareTask, &TMcompareTask);
/* =============================================================================
* compareQuery
* -- Want smallest ID first
* -- For vector_sort
* =============================================================================
*/
static int
compareQuery (const void* aPtr, const void* bPtr)
{
query_t* aQueryPtr = (query_t*)(*(void**)aPtr);
query_t* bQueryPtr = (query_t*)(*(void**)bPtr);
return (aQueryPtr->index - bQueryPtr->index);
}
/* =============================================================================
* learner_alloc
* =============================================================================
*/
learner_t*
learner_alloc (data_t* dataPtr, adtree_t* adtreePtr, long numThread)
{
learner_t* learnerPtr;
learnerPtr = (learner_t*)SEQ_MALLOC(sizeof(learner_t));
if (learnerPtr) {
learnerPtr->adtreePtr = adtreePtr;
learnerPtr->netPtr = net_alloc(dataPtr->numVar);
assert(learnerPtr->netPtr);
learnerPtr->localBaseLogLikelihoods =
(float*)SEQ_MALLOC(dataPtr->numVar * sizeof(float));
assert(learnerPtr->localBaseLogLikelihoods);
learnerPtr->baseLogLikelihood = 0.0F;
learnerPtr->tasks =
(learner_task_t*)SEQ_MALLOC(dataPtr->numVar * sizeof(learner_task_t));
assert(learnerPtr->tasks);
learnerPtr->taskListPtr = Plist_alloc(&learner_comparetask);
assert(learnerPtr->taskListPtr);
learnerPtr->numTotalParent = 0;
}
return learnerPtr;
}
/* =============================================================================
* learner_free
* =============================================================================
*/
void
learner_free (learner_t* learnerPtr)
{
list_free(learnerPtr->taskListPtr);
SEQ_FREE(learnerPtr->tasks);
SEQ_FREE(learnerPtr->localBaseLogLikelihoods);
net_free(learnerPtr->netPtr);
SEQ_FREE(learnerPtr);
}
/* =============================================================================
* computeSpecificLocalLogLikelihood
* -- Query vectors should not contain wildcards
* =============================================================================
*/
static float
computeSpecificLocalLogLikelihood (adtree_t* adtreePtr,
vector_t* queryVectorPtr,
vector_t* parentQueryVectorPtr)
{
long count = adtree_getCount(adtreePtr, queryVectorPtr);
if (count == 0) {
return 0.0;
}
double probability = (double)count / (double)adtreePtr->numRecord;
long parentCount = adtree_getCount(adtreePtr, parentQueryVectorPtr);
assert(parentCount >= count);
assert(parentCount > 0);
return (float)(probability * (double)log((double)count/ (double)parentCount));
}
/* =============================================================================
* createPartition
* =============================================================================
*/
static void
createPartition (long min, long max, long id, long n,
long* startPtr, long* stopPtr)
{
long range = max - min;
long chunk = MAX(1, ((range + n/2) / n)); /* rounded */
long start = min + chunk * id;
long stop;
if (id == (n-1)) {
stop = max;
} else {
stop = MIN(max, (start + chunk));
}
*startPtr = start;
*stopPtr = stop;
}
/* =============================================================================
* createTaskList
* -- baseLogLikelihoods and taskListPtr are updated
* =============================================================================
*/
static void
createTaskList (void* argPtr)
{
TM_THREAD_ENTER();
long myId = thread_getId();
//if(myId==0) getchar();
//thread_barrier_wait();
long numThread = thread_getNumThread();
learner_t* learnerPtr = (learner_t*)argPtr;
list_t* taskListPtr = learnerPtr->taskListPtr;
bool_t status;
adtree_t* adtreePtr = learnerPtr->adtreePtr;
float* localBaseLogLikelihoods = learnerPtr->localBaseLogLikelihoods;
learner_task_t* tasks = learnerPtr->tasks;
query_t queries[2];
vector_t* queryVectorPtr = PVECTOR_ALLOC(2);
assert(queryVectorPtr);
status = vector_pushBack(queryVectorPtr, (void*)&queries[0]);
assert(status);
query_t parentQuery;
vector_t* parentQueryVectorPtr = PVECTOR_ALLOC(1);
assert(parentQueryVectorPtr);
long numVar = adtreePtr->numVar;
long numRecord = adtreePtr->numRecord;
float baseLogLikelihood = 0.0;
float penalty = (float)(-0.5 * log((double)numRecord)); /* only add 1 edge */
long v;
long v_start;
long v_stop;
createPartition(0, numVar, myId, numThread, &v_start, &v_stop);
/*
* Compute base log likelihood for each variable and total base loglikelihood
*/
for (v = v_start; v < v_stop; v++) {
float localBaseLogLikelihood = 0.0;
queries[0].index = v;
queries[0].value = 0;
localBaseLogLikelihood +=
computeSpecificLocalLogLikelihood(adtreePtr,
queryVectorPtr,
parentQueryVectorPtr);
queries[0].value = 1;
localBaseLogLikelihood +=
computeSpecificLocalLogLikelihood(adtreePtr,
queryVectorPtr,
parentQueryVectorPtr);
localBaseLogLikelihoods[v] = localBaseLogLikelihood;
baseLogLikelihood += localBaseLogLikelihood;
} /* foreach variable */
TM_BEGIN();
float globalBaseLogLikelihood =
TM_SHARED_READ_F(learnerPtr->baseLogLikelihood);
TM_SHARED_WRITE_F(learnerPtr->baseLogLikelihood,
(baseLogLikelihood + globalBaseLogLikelihood));
TM_END();
/*
* For each variable, find if the addition of any edge _to_ it is better
*/
status = PVECTOR_PUSHBACK(parentQueryVectorPtr, (void*)&parentQuery);
assert(status);
for (v = v_start; v < v_stop; v++) {
/*
* Compute base log likelihood for this variable
*/
queries[0].index = v;
long bestLocalIndex = v;
float bestLocalLogLikelihood = localBaseLogLikelihoods[v];
status = PVECTOR_PUSHBACK(queryVectorPtr, (void*)&queries[1]);
assert(status);
long vv;
for (vv = 0; vv < numVar; vv++) {
if (vv == v) {
continue;
}
parentQuery.index = vv;
if (v < vv) {
queries[0].index = v;
queries[1].index = vv;
} else {
queries[0].index = vv;
queries[1].index = v;
}
float newLocalLogLikelihood = 0.0;
queries[0].value = 0;
queries[1].value = 0;
parentQuery.value = 0;
newLocalLogLikelihood +=
computeSpecificLocalLogLikelihood(adtreePtr,
queryVectorPtr,
parentQueryVectorPtr);
queries[0].value = 0;
queries[1].value = 1;
parentQuery.value = ((vv < v) ? 0 : 1);
newLocalLogLikelihood +=
computeSpecificLocalLogLikelihood(adtreePtr,
queryVectorPtr,
parentQueryVectorPtr);
queries[0].value = 1;
queries[1].value = 0;
parentQuery.value = ((vv < v) ? 1 : 0);
newLocalLogLikelihood +=
computeSpecificLocalLogLikelihood(adtreePtr,
queryVectorPtr,
parentQueryVectorPtr);
queries[0].value = 1;
queries[1].value = 1;
parentQuery.value = 1;
newLocalLogLikelihood +=
computeSpecificLocalLogLikelihood(adtreePtr,
queryVectorPtr,
parentQueryVectorPtr);
if (newLocalLogLikelihood > bestLocalLogLikelihood) {
bestLocalIndex = vv;
bestLocalLogLikelihood = newLocalLogLikelihood;
}
} /* foreach other variable */
PVECTOR_POPBACK(queryVectorPtr);
if (bestLocalIndex != v) {
float logLikelihood = numRecord * (baseLogLikelihood +
+ bestLocalLogLikelihood
- localBaseLogLikelihoods[v]);
float score = penalty + logLikelihood;
learner_task_t* taskPtr = &tasks[v];
taskPtr->op = OPERATION_INSERT;
taskPtr->fromId = bestLocalIndex;
taskPtr->toId = v;
taskPtr->score = score;
TM_BEGIN();
status = TMLIST_INSERT(taskListPtr, (void*)taskPtr);
TM_END();
assert(status);
}
} /* for each variable */
PVECTOR_FREE(queryVectorPtr);
PVECTOR_FREE(parentQueryVectorPtr);
#ifdef TEST_LEARNER
list_iter_t it;
list_iter_reset(&it, taskListPtr);
while (list_iter_hasNext(&it, taskListPtr)) {
learner_task_t* taskPtr = (learner_task_t*)list_iter_next(&it, taskListPtr);
taskPtr->op, taskPtr->fromId, taskPtr->toId, taskPtr->score);
}
#endif /* TEST_LEARNER */
TM_THREAD_EXIT();
}
/* =============================================================================
* TMpopTask
* -- Returns NULL is list is empty
* =============================================================================
*/
learner_task_t*
TMpopTask (TM_ARGDECL list_t* taskListPtr)
{
learner_task_t* taskPtr = NULL;
list_iter_t it;
TMLIST_ITER_RESET(&it, taskListPtr);
if (TMLIST_ITER_HASNEXT(&it, taskListPtr)) {
taskPtr = (learner_task_t*)TMLIST_ITER_NEXT(&it, taskListPtr);
bool_t status = TMLIST_REMOVE(taskListPtr, (void*)taskPtr);
assert(status);
}
return taskPtr;
}
/* =============================================================================
* populateParentQuery
* -- Modifies contents of parentQueryVectorPtr
* =============================================================================
*/
static void
populateParentQueryVector (net_t* netPtr,
long id,
query_t* queries,
vector_t* parentQueryVectorPtr)
{
vector_clear(parentQueryVectorPtr);
list_t* parentIdListPtr = net_getParentIdListPtr(netPtr, id);
list_iter_t it;
list_iter_reset(&it, parentIdListPtr);
while (list_iter_hasNext(&it, parentIdListPtr)) {
long parentId = (long)list_iter_next(&it, parentIdListPtr);
bool_t status = vector_pushBack(parentQueryVectorPtr,
(void*)&queries[parentId]);
assert(status);
}
}
/* =============================================================================
* TMpopulateParentQuery
* -- Modifies contents of parentQueryVectorPtr
* =============================================================================
*/
static void
TMpopulateParentQueryVector (TM_ARGDECL
net_t* netPtr,
long id,
query_t* queries,
vector_t* parentQueryVectorPtr)
{
vector_clear(parentQueryVectorPtr);
list_t* parentIdListPtr = net_getParentIdListPtr(netPtr, id);
list_iter_t it;
TMLIST_ITER_RESET(&it, parentIdListPtr);
while (TMLIST_ITER_HASNEXT(&it, parentIdListPtr)) {
long parentId = (long)TMLIST_ITER_NEXT(&it, parentIdListPtr);
bool_t status = PVECTOR_PUSHBACK(parentQueryVectorPtr,
(void*)&queries[parentId]);
assert(status);
}
}
/* =============================================================================
* populateQueryVectors
* -- Modifies contents of queryVectorPtr and parentQueryVectorPtr
* =============================================================================
*/
static void
populateQueryVectors (net_t* netPtr,
long id,
query_t* queries,
vector_t* queryVectorPtr,
vector_t* parentQueryVectorPtr)
{
populateParentQueryVector(netPtr, id, queries, parentQueryVectorPtr);
bool_t status;
status = vector_copy(queryVectorPtr, parentQueryVectorPtr);
assert(status);
status = vector_pushBack(queryVectorPtr, (void*)&queries[id]);
assert(status);
vector_sort(queryVectorPtr, &compareQuery);
}
/* =============================================================================
* TMpopulateQueryVectors
* -- Modifies contents of queryVectorPtr and parentQueryVectorPtr
* =============================================================================
*/
static void
TMpopulateQueryVectors (TM_ARGDECL
net_t* netPtr,
long id,
query_t* queries,
vector_t* queryVectorPtr,
vector_t* parentQueryVectorPtr)
{
TMpopulateParentQueryVector(TM_ARG netPtr, id, queries, parentQueryVectorPtr);
bool_t status;
status = PVECTOR_COPY(queryVectorPtr, parentQueryVectorPtr);
assert(status);
status = PVECTOR_PUSHBACK(queryVectorPtr, (void*)&queries[id]);
assert(status);
PVECTOR_SORT(queryVectorPtr, &compareQuery);
}
/* =============================================================================
* computeLocalLogLikelihoodHelper
* -- Recursive helper routine
* =============================================================================
*/
static float
computeLocalLogLikelihoodHelper (long i,
long numParent,
adtree_t* adtreePtr,
query_t* queries,
vector_t* queryVectorPtr,
vector_t* parentQueryVectorPtr)
{
if (i >= numParent) {
return computeSpecificLocalLogLikelihood(adtreePtr,
queryVectorPtr,
parentQueryVectorPtr);
}
float localLogLikelihood = 0.0;
query_t* parentQueryPtr = (query_t*)vector_at(parentQueryVectorPtr, i);
long parentIndex = parentQueryPtr->index;
queries[parentIndex].value = 0;
localLogLikelihood += computeLocalLogLikelihoodHelper((i + 1),
numParent,
adtreePtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
queries[parentIndex].value = 1;
localLogLikelihood += computeLocalLogLikelihoodHelper((i + 1),
numParent,
adtreePtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
queries[parentIndex].value = QUERY_VALUE_WILDCARD;
return localLogLikelihood;
}
/* =============================================================================
* computeLocalLogLikelihood
* -- Populate the query vectors before passing as args
* =============================================================================
*/
static float
computeLocalLogLikelihood (long id,
adtree_t* adtreePtr,
net_t* netPtr,
query_t* queries,
vector_t* queryVectorPtr,
vector_t* parentQueryVectorPtr)
{
long numParent = vector_getSize(parentQueryVectorPtr);
float localLogLikelihood = 0.0;
queries[id].value = 0;
localLogLikelihood += computeLocalLogLikelihoodHelper(0,
numParent,
adtreePtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
queries[id].value = 1;
localLogLikelihood += computeLocalLogLikelihoodHelper(0,
numParent,
adtreePtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
queries[id].value = QUERY_VALUE_WILDCARD;
return localLogLikelihood;
}
/* =============================================================================
* TMfindBestInsertTask
* =============================================================================
*/
static learner_task_t
TMfindBestInsertTask (TM_ARGDECL findBestTaskArg_t* argPtr)
{
long toId = argPtr->toId;
learner_t* learnerPtr = argPtr->learnerPtr;
query_t* queries = argPtr->queries;
vector_t* queryVectorPtr = argPtr->queryVectorPtr;
vector_t* parentQueryVectorPtr = argPtr->parentQueryVectorPtr;
long numTotalParent = argPtr->numTotalParent;
float basePenalty = argPtr->basePenalty;
float baseLogLikelihood = argPtr->baseLogLikelihood;
bitmap_t* invalidBitmapPtr = argPtr->bitmapPtr;
queue_t* workQueuePtr = argPtr->workQueuePtr;
vector_t* baseParentQueryVectorPtr = argPtr->aQueryVectorPtr;
vector_t* baseQueryVectorPtr = argPtr->bQueryVectorPtr;
bool_t status;
adtree_t* adtreePtr = learnerPtr->adtreePtr;
net_t* netPtr = learnerPtr->netPtr;
float* localBaseLogLikelihoods = learnerPtr->localBaseLogLikelihoods;
// printf("ptrs -- %i %i %i \n", global_insertPenalty, global_maxNumEdgeLearned, global_operationQualityFactor);
TMpopulateParentQueryVector(TM_ARG netPtr, toId, queries, parentQueryVectorPtr);
/*
* Create base query and parentQuery
*/
status = PVECTOR_COPY(baseParentQueryVectorPtr, parentQueryVectorPtr);
assert(status);
status = PVECTOR_COPY(baseQueryVectorPtr, baseParentQueryVectorPtr);
assert(status);
status = PVECTOR_PUSHBACK(baseQueryVectorPtr, (void*)&queries[toId]);
assert(status);
PVECTOR_SORT(queryVectorPtr, &compareQuery);
/*
* Search all possible valid operations for better local log likelihood
*/
float bestFromId = toId; /* flag for not found */
float oldLocalLogLikelihood =
(float)TM_SHARED_READ_F(localBaseLogLikelihoods[toId]);
float bestLocalLogLikelihood = oldLocalLogLikelihood;
status = TMNET_FINDDESCENDANTS(netPtr, toId, invalidBitmapPtr, workQueuePtr);
//printf("---- %p, %i %p %p\n", netPtr, toId, invalidBitmapPtr, workQueuePtr);
assert(status);
long fromId = -1;
list_t* parentIdListPtr = net_getParentIdListPtr(netPtr, toId);
long maxNumEdgeLearned = global_maxNumEdgeLearned;
if ((maxNumEdgeLearned < 0) ||
(TMLIST_GETSIZE(parentIdListPtr) <= maxNumEdgeLearned))
{
list_iter_t it;
TMLIST_ITER_RESET(&it, parentIdListPtr);
while (TMLIST_ITER_HASNEXT(&it, parentIdListPtr)) {
long parentId = (long)TMLIST_ITER_NEXT(&it, parentIdListPtr);
bitmap_set(invalidBitmapPtr, parentId); /* invalid since already have edge */
}
while ((fromId = bitmap_findClear(invalidBitmapPtr, (fromId + 1))) >= 0) {
if (fromId == toId) {
continue;
}
status = PVECTOR_COPY(queryVectorPtr, baseQueryVectorPtr);
assert(status);
status = PVECTOR_PUSHBACK(queryVectorPtr, (void*)&queries[fromId]);
assert(status);
PVECTOR_SORT(queryVectorPtr, &compareQuery);
status = PVECTOR_COPY(parentQueryVectorPtr, baseParentQueryVectorPtr);
assert(status);
status = PVECTOR_PUSHBACK(parentQueryVectorPtr, (void*)&queries[fromId]);
assert(status);
PVECTOR_SORT(parentQueryVectorPtr, &compareQuery);
float newLocalLogLikelihood =
computeLocalLogLikelihood(toId,
adtreePtr,
netPtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
if (newLocalLogLikelihood > bestLocalLogLikelihood) {
bestLocalLogLikelihood = newLocalLogLikelihood;
bestFromId = fromId;
}
} /* foreach valid parent */
} /* if have not exceeded max number of edges to learn */
/*
* Return best task; Note: if none is better, fromId will equal toId
*/
learner_task_t bestTask;
bestTask.op = OPERATION_INSERT;
bestTask.fromId = bestFromId;
bestTask.toId = toId;
bestTask.score = 0.0;
if (bestFromId != toId) {
long numRecord = adtreePtr->numRecord;
long numParent = TMLIST_GETSIZE(parentIdListPtr) + 1;
float penalty =
(numTotalParent + numParent * global_insertPenalty) * basePenalty;
float logLikelihood = numRecord * (baseLogLikelihood +
+ bestLocalLogLikelihood
- oldLocalLogLikelihood);
float bestScore = penalty + logLikelihood;
bestTask.score = bestScore;
}
return bestTask;
}
#ifdef LEARNER_TRY_REMOVE
/* =============================================================================
* TMfindBestRemoveTask
* =============================================================================
*/
static learner_task_t
TMfindBestRemoveTask (TM_ARGDECL findBestTaskArg_t* argPtr)
{
long toId = argPtr->toId;
learner_t* learnerPtr = argPtr->learnerPtr;
query_t* queries = argPtr->queries;
vector_t* queryVectorPtr = argPtr->queryVectorPtr;
vector_t* parentQueryVectorPtr = argPtr->parentQueryVectorPtr;
long numTotalParent = argPtr->numTotalParent;
float basePenalty = argPtr->basePenalty;
float baseLogLikelihood = argPtr->baseLogLikelihood;
vector_t* origParentQueryVectorPtr = argPtr->aQueryVectorPtr;
bool_t status;
adtree_t* adtreePtr = learnerPtr->adtreePtr;
net_t* netPtr = learnerPtr->netPtr;
float* localBaseLogLikelihoods = learnerPtr->localBaseLogLikelihoods;
TMpopulateParentQueryVector(TM_ARG
netPtr, toId, queries, origParentQueryVectorPtr);
long numParent = PVECTOR_GETSIZE(origParentQueryVectorPtr);
/*
* Search all possible valid operations for better local log likelihood
*/
float bestFromId = toId; /* flag for not found */
float oldLocalLogLikelihood =
(float)TM_SHARED_READ_F(localBaseLogLikelihoods[toId]);
float bestLocalLogLikelihood = oldLocalLogLikelihood;
long i;
for (i = 0; i < numParent; i++) {
query_t* queryPtr = (query_t*)PVECTOR_AT(origParentQueryVectorPtr, i);
long fromId = queryPtr->index;
/*
* Create parent query (subset of parents since remove an edge)
*/
PVECTOR_CLEAR(parentQueryVectorPtr);
long p;
for (p = 0; p < numParent; p++) {
if (p != fromId) {
query_t* queryPtr = (query_t*)PVECTOR_AT(origParentQueryVectorPtr, p);
status = PVECTOR_PUSHBACK(parentQueryVectorPtr,
(void*)&queries[queryPtr->index]);
assert(status);
}
} /* create new parent query */
/*
* Create query
*/
status = PVECTOR_COPY(queryVectorPtr, parentQueryVectorPtr);
assert(status);
status = PVECTOR_PUSHBACK(queryVectorPtr, (void*)&queries[toId]);
assert(status);
PVECTOR_SORT(queryVectorPtr, &compareQuery);
/*
* See if removing parent is better
*/
float newLocalLogLikelihood =
computeLocalLogLikelihood(toId,
adtreePtr,
netPtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
if (newLocalLogLikelihood > bestLocalLogLikelihood) {
bestLocalLogLikelihood = newLocalLogLikelihood;
bestFromId = fromId;
}
} /* for each parent */
/*
* Return best task; Note: if none is better, fromId will equal toId
*/
learner_task_t bestTask;
bestTask.op = OPERATION_REMOVE;
bestTask.fromId = bestFromId;
bestTask.toId = toId;
bestTask.score = 0.0;
if (bestFromId != toId) {
long numRecord = adtreePtr->numRecord;
float penalty = (numTotalParent - 1) * basePenalty;
float logLikelihood = numRecord * (baseLogLikelihood +
+ bestLocalLogLikelihood
- oldLocalLogLikelihood);
float bestScore = penalty + logLikelihood;
bestTask.score = bestScore;
}
return bestTask;
}
#endif /* LEARNER_TRY_REMOVE */
#ifdef LEARNER_TRY_REVERSE
/* =============================================================================
* TMfindBestReverseTask
* =============================================================================
*/
static learner_task_t
TMfindBestReverseTask (TM_ARGDECL findBestTaskArg_t* argPtr)
{
long toId = argPtr->toId;
learner_t* learnerPtr = argPtr->learnerPtr;
query_t* queries = argPtr->queries;
vector_t* queryVectorPtr = argPtr->queryVectorPtr;
vector_t* parentQueryVectorPtr = argPtr->parentQueryVectorPtr;
long numTotalParent = argPtr->numTotalParent;
float basePenalty = argPtr->basePenalty;
float baseLogLikelihood = argPtr->baseLogLikelihood;
bitmap_t* visitedBitmapPtr = argPtr->bitmapPtr;
queue_t* workQueuePtr = argPtr->workQueuePtr;
vector_t* toOrigParentQueryVectorPtr = argPtr->aQueryVectorPtr;
vector_t* fromOrigParentQueryVectorPtr = argPtr->bQueryVectorPtr;
bool_t status;
adtree_t* adtreePtr = learnerPtr->adtreePtr;
net_t* netPtr = learnerPtr->netPtr;
float* localBaseLogLikelihoods = learnerPtr->localBaseLogLikelihoods;
TMpopulateParentQueryVector(TM_ARG
netPtr, toId, queries, toOrigParentQueryVectorPtr);
long numParent = PVECTOR_GETSIZE(toOrigParentQueryVectorPtr);
/*
* Search all possible valid operations for better local log likelihood
*/
long bestFromId = toId; /* flag for not found */
float oldLocalLogLikelihood =
(float)TM_SHARED_READ_F(localBaseLogLikelihoods[toId]);
float bestLocalLogLikelihood = oldLocalLogLikelihood;
long fromId = 0;
long i;
for (i = 0; i < numParent; i++) {
query_t* queryPtr = (query_t*)PVECTOR_AT(toOrigParentQueryVectorPtr, i);
fromId = queryPtr->index;
bestLocalLogLikelihood =
oldLocalLogLikelihood +
(float)TM_SHARED_READ_F(localBaseLogLikelihoods[fromId]);
TMpopulateParentQueryVector(TM_ARG
netPtr,
fromId,
queries,
fromOrigParentQueryVectorPtr);
/*
* Create parent query (subset of parents since remove an edge)
*/
PVECTOR_CLEAR(parentQueryVectorPtr);
long p;
for (p = 0; p < numParent; p++) {
if (p != fromId) {
query_t* queryPtr = (query_t*)PVECTOR_AT(toOrigParentQueryVectorPtr, p);
status = PVECTOR_PUSHBACK(parentQueryVectorPtr,
(void*)&queries[queryPtr->index]);
assert(status);
}
} /* create new parent query */
/*
* Create query
*/
status = PVECTOR_COPY(queryVectorPtr, parentQueryVectorPtr);
assert(status);
status = PVECTOR_PUSHBACK(queryVectorPtr, (void*)&queries[toId]);
assert(status);
PVECTOR_SORT(queryVectorPtr, &compareQuery);
/*
* Get log likelihood for removing parent from toId
*/
float newLocalLogLikelihood =
computeLocalLogLikelihood(toId,
adtreePtr,
netPtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
/*
* Get log likelihood for adding parent to fromId
*/
status = PVECTOR_COPY(parentQueryVectorPtr, fromOrigParentQueryVectorPtr);
assert(status);
status = PVECTOR_PUSHBACK(parentQueryVectorPtr, (void*)&queries[toId]);
assert(status);
PVECTOR_SORT(parentQueryVectorPtr, &compareQuery);
status = PVECTOR_COPY(queryVectorPtr, parentQueryVectorPtr);
assert(status);
status = PVECTOR_PUSHBACK(queryVectorPtr, (void*)&queries[fromId]);
assert(status);
PVECTOR_SORT(queryVectorPtr, &compareQuery);
newLocalLogLikelihood +=
computeLocalLogLikelihood(fromId,
adtreePtr,
netPtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
/*
* Record best
*/
if (newLocalLogLikelihood > bestLocalLogLikelihood) {
bestLocalLogLikelihood = newLocalLogLikelihood;
bestFromId = fromId;
}
} /* for each parent */
/*
* Check validity of best
*/
if (bestFromId != toId) {
bool_t isTaskValid = TRUE;
TMNET_APPLYOPERATION(netPtr, OPERATION_REMOVE, bestFromId, toId);
if (TMNET_ISPATH(netPtr,
bestFromId,
toId,
visitedBitmapPtr,
workQueuePtr))
{
isTaskValid = FALSE;
}
TMNET_APPLYOPERATION(netPtr, OPERATION_INSERT, bestFromId, toId);
if (!isTaskValid) {
bestFromId = toId;
}
}
/*
* Return best task; Note: if none is better, fromId will equal toId
*/
learner_task_t bestTask;
bestTask.op = OPERATION_REVERSE;
bestTask.fromId = bestFromId;
bestTask.toId = toId;
bestTask.score = 0.0;
if (bestFromId != toId) {
float fromLocalLogLikelihood =
(float)TM_SHARED_READ_F(localBaseLogLikelihoods[bestFromId]);
long numRecord = adtreePtr->numRecord;
float penalty = numTotalParent * basePenalty;
float logLikelihood = numRecord * (baseLogLikelihood +
+ bestLocalLogLikelihood
- oldLocalLogLikelihood
- fromLocalLogLikelihood);
float bestScore = penalty + logLikelihood;
bestTask.score = bestScore;
}
return bestTask;
}
#endif /* LEARNER_TRY_REVERSE */
/* =============================================================================
* learnStructure
*
* Note it is okay if the score is not exact, as we are relaxing the greedy
* search. This means we do not need to communicate baseLogLikelihood across
* threads.
* =============================================================================
*/
static void
learnStructure (void* argPtr)
{
TM_THREAD_ENTER();
learner_t* learnerPtr = (learner_t*)argPtr;
net_t* netPtr = learnerPtr->netPtr;
adtree_t* adtreePtr = learnerPtr->adtreePtr;
long numRecord = adtreePtr->numRecord;
float* localBaseLogLikelihoods = learnerPtr->localBaseLogLikelihoods;
list_t* taskListPtr = learnerPtr->taskListPtr;
float operationQualityFactor = global_operationQualityFactor;
bitmap_t* visitedBitmapPtr = PBITMAP_ALLOC(learnerPtr->adtreePtr->numVar);
assert(visitedBitmapPtr);
queue_t* workQueuePtr = PQUEUE_ALLOC(-1);
assert(workQueuePtr);
long numVar = adtreePtr->numVar;
query_t* queries = (query_t*)P_MALLOC(numVar * sizeof(query_t));
assert(queries);
long v;
for (v = 0; v < numVar; v++) {
queries[v].index = v;
queries[v].value = QUERY_VALUE_WILDCARD;
}
float basePenalty = (float)(-0.5 * log((double)numRecord));
vector_t* queryVectorPtr = PVECTOR_ALLOC(1);
assert(queryVectorPtr);
vector_t* parentQueryVectorPtr = PVECTOR_ALLOC(1);
assert(parentQueryVectorPtr);
vector_t* aQueryVectorPtr = PVECTOR_ALLOC(1);
assert(aQueryVectorPtr);
vector_t* bQueryVectorPtr = PVECTOR_ALLOC(1);
assert(bQueryVectorPtr);
findBestTaskArg_t arg;
arg.learnerPtr = learnerPtr;
arg.queries = queries;
arg.queryVectorPtr = queryVectorPtr;
arg.parentQueryVectorPtr = parentQueryVectorPtr;
arg.bitmapPtr = visitedBitmapPtr;
arg.workQueuePtr = workQueuePtr;
arg.aQueryVectorPtr = aQueryVectorPtr;
arg.bQueryVectorPtr = bQueryVectorPtr;
thread_barrier_wait();
while (1) {
learner_task_t* taskPtr;
TM_BEGIN();
taskPtr = TMpopTask(TM_ARG taskListPtr);
TM_END();
if (taskPtr == NULL) {
break;
}
operation_t op = taskPtr->op;
long fromId = taskPtr->fromId;
long toId = taskPtr->toId;
bool_t isTaskValid;
TM_BEGIN();
/*
* Check if task is still valid
*/
isTaskValid = TRUE;
switch (op) {
case OPERATION_INSERT: {
if (TMNET_HASEDGE(netPtr, fromId, toId) ||
TMNET_ISPATH(netPtr,
toId,
fromId,
visitedBitmapPtr,
workQueuePtr))
{
isTaskValid = FALSE;
}
break;
}
case OPERATION_REMOVE: {
/* Can never create cycle, so always valid */
break;
}
case OPERATION_REVERSE: {
/* Temporarily remove edge for check */
TMNET_APPLYOPERATION(netPtr, OPERATION_REMOVE, fromId, toId);
if (TMNET_ISPATH(netPtr,
fromId,
toId,
visitedBitmapPtr,
workQueuePtr))
{
isTaskValid = FALSE;
}
TMNET_APPLYOPERATION(netPtr, OPERATION_INSERT, fromId, toId);
break;
}
default:
assert(0);
}
#ifdef TEST_LEARNER
taskPtr->op, taskPtr->fromId, taskPtr->toId, taskPtr->score,
(isTaskValid ? "yes" : "no"));
fflush(stdout);
#endif
/*
* Perform task: update graph and probabilities
*/
if (isTaskValid) {
TMNET_APPLYOPERATION(netPtr, op, fromId, toId);
}
TM_END();
float deltaLogLikelihood = 0.0;
if (isTaskValid) {
switch (op) {
float newBaseLogLikelihood;
case OPERATION_INSERT: {
TM_BEGIN();
TMpopulateQueryVectors(TM_ARG
netPtr,
toId,
queries,
queryVectorPtr,
parentQueryVectorPtr);
newBaseLogLikelihood =
computeLocalLogLikelihood(toId,
adtreePtr,
netPtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
float toLocalBaseLogLikelihood =
(float)TM_SHARED_READ_F(localBaseLogLikelihoods[toId]);
deltaLogLikelihood +=
toLocalBaseLogLikelihood - newBaseLogLikelihood;
TM_SHARED_WRITE_F(localBaseLogLikelihoods[toId],
newBaseLogLikelihood);
TM_END();
TM_BEGIN();
long numTotalParent = (long)TM_SHARED_READ_L(learnerPtr->numTotalParent);
TM_SHARED_WRITE_L(learnerPtr->numTotalParent, (numTotalParent + 1));
TM_END();
break;
}
#ifdef LEARNER_TRY_REMOVE
case OPERATION_REMOVE: {
TM_BEGIN();
TMpopulateQueryVectors(TM_ARG
netPtr,
fromId,
queries,
queryVectorPtr,
parentQueryVectorPtr);
newBaseLogLikelihood =
computeLocalLogLikelihood(fromId,
adtreePtr,
netPtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
float fromLocalBaseLogLikelihood =
(float)TM_SHARED_READ_F(localBaseLogLikelihoods[fromId]);
deltaLogLikelihood +=
fromLocalBaseLogLikelihood - newBaseLogLikelihood;
TM_SHARED_WRITE_F(localBaseLogLikelihoods[fromId],
newBaseLogLikelihood);
TM_END();
TM_BEGIN();
long numTotalParent = (long)TM_SHARED_READ_L(learnerPtr->numTotalParent);
TM_SHARED_WRITE_L(learnerPtr->numTotalParent, (numTotalParent - 1));
TM_END();
break;
}
#endif /* LEARNER_TRY_REMOVE */
#ifdef LEARNER_TRY_REVERSE
case OPERATION_REVERSE: {
TM_BEGIN();
TMpopulateQueryVectors(TM_ARG
netPtr,
fromId,
queries,
queryVectorPtr,
parentQueryVectorPtr);
newBaseLogLikelihood =
computeLocalLogLikelihood(fromId,
adtreePtr,
netPtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
float fromLocalBaseLogLikelihood =
(float)TM_SHARED_READ_F(localBaseLogLikelihoods[fromId]);
deltaLogLikelihood +=
fromLocalBaseLogLikelihood - newBaseLogLikelihood;
TM_SHARED_WRITE_F(localBaseLogLikelihoods[fromId],
newBaseLogLikelihood);
TM_END();
TM_BEGIN();
TMpopulateQueryVectors(TM_ARG
netPtr,
toId,
queries,
queryVectorPtr,
parentQueryVectorPtr);
newBaseLogLikelihood =
computeLocalLogLikelihood(toId,
adtreePtr,
netPtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
float toLocalBaseLogLikelihood =
(float)TM_SHARED_READ_F(localBaseLogLikelihoods[toId]);
deltaLogLikelihood +=
toLocalBaseLogLikelihood - newBaseLogLikelihood;
TM_SHARED_WRITE_F(localBaseLogLikelihoods[toId],
newBaseLogLikelihood);
TM_END();
break;
}
#endif /* LEARNER_TRY_REVERSE */
default:
assert(0);
} /* switch op */
} /* if isTaskValid */
/*
* Update/read globals
*/
float baseLogLikelihood;
long numTotalParent;
TM_BEGIN();
float oldBaseLogLikelihood =
(float)TM_SHARED_READ_F(learnerPtr->baseLogLikelihood);
float newBaseLogLikelihood = oldBaseLogLikelihood + deltaLogLikelihood;
TM_SHARED_WRITE_F(learnerPtr->baseLogLikelihood, newBaseLogLikelihood);
baseLogLikelihood = newBaseLogLikelihood;
numTotalParent = (long)TM_SHARED_READ_L(learnerPtr->numTotalParent);
TM_END();
/*
* Find next task
*/
float baseScore = ((float)numTotalParent * basePenalty)
+ (numRecord * baseLogLikelihood);
learner_task_t bestTask;
bestTask.op = NUM_OPERATION;
bestTask.toId = -1;
bestTask.fromId = -1;
bestTask.score = baseScore;
learner_task_t newTask;
arg.toId = toId;
arg.numTotalParent = numTotalParent;
arg.basePenalty = basePenalty;
arg.baseLogLikelihood = baseLogLikelihood;
TM_BEGIN();
newTask = TMfindBestInsertTask(TM_ARG &arg);
TM_END();
if ((newTask.fromId != newTask.toId) &&
(newTask.score > (bestTask.score / operationQualityFactor)))
{
bestTask = newTask;
}
#ifdef LEARNER_TRY_REMOVE
TM_BEGIN();
newTask = TMfindBestRemoveTask(TM_ARG &arg);
TM_END();
if ((newTask.fromId != newTask.toId) &&
(newTask.score > (bestTask.score / operationQualityFactor)))
{
bestTask = newTask;
}
#endif /* LEARNER_TRY_REMOVE */
#ifdef LEARNER_TRY_REVERSE
TM_BEGIN();
newTask = TMfindBestReverseTask(TM_ARG &arg);
TM_END();
if ((newTask.fromId != newTask.toId) &&
(newTask.score > (bestTask.score / operationQualityFactor)))
{
bestTask = newTask;
}
#endif /* LEARNER_TRY_REVERSE */
if (bestTask.toId != -1) {
learner_task_t* tasks = learnerPtr->tasks;
tasks[toId] = bestTask;
TM_BEGIN();
TMLIST_INSERT(taskListPtr, (void*)&tasks[toId]);
TM_END();
#ifdef TEST_LEARNER
bestTask.op, bestTask.fromId, bestTask.toId, bestTask.score);
fflush(stdout);
#endif
}
} /* while (tasks) */
PBITMAP_FREE(visitedBitmapPtr);
PQUEUE_FREE(workQueuePtr);
PVECTOR_FREE(bQueryVectorPtr);
PVECTOR_FREE(aQueryVectorPtr);
PVECTOR_FREE(queryVectorPtr);
PVECTOR_FREE(parentQueryVectorPtr);
P_FREE(queries);
TM_THREAD_EXIT();
}
/* =============================================================================
* learner_run
* -- Call adtree_make before this
* =============================================================================
*/
void
learner_run (learner_t* learnerPtr)
{
#ifdef OTM
#pragma omp parallel
{
createTaskList((void*)learnerPtr);
}
#pragma omp parallel
{
learnStructure((void*)learnerPtr);
}
#else
thread_start(&createTaskList, (void*)learnerPtr);
thread_start(&learnStructure, (void*)learnerPtr);
#endif
}
/* =============================================================================
* learner_score
* -- Score entire network
* =============================================================================
*/
float
learner_score (learner_t* learnerPtr)
{
adtree_t* adtreePtr = learnerPtr->adtreePtr;
net_t* netPtr = learnerPtr->netPtr;
vector_t* queryVectorPtr = vector_alloc(1);
assert(queryVectorPtr);
vector_t* parentQueryVectorPtr = vector_alloc(1);
assert(parentQueryVectorPtr);
long numVar = adtreePtr->numVar;
query_t* queries = (query_t*)SEQ_MALLOC(numVar * sizeof(query_t));
assert(queries);
long v;
for (v = 0; v < numVar; v++) {
queries[v].index = v;
queries[v].value = QUERY_VALUE_WILDCARD;
}
long numTotalParent = 0;
float logLikelihood = 0.0;
for (v = 0; v < numVar; v++) {
list_t* parentIdListPtr = net_getParentIdListPtr(netPtr, v);
numTotalParent += list_getSize(parentIdListPtr);
populateQueryVectors(netPtr,
v,
queries,
queryVectorPtr,
parentQueryVectorPtr);
float localLogLikelihood = computeLocalLogLikelihood(v,
adtreePtr,
netPtr,
queries,
queryVectorPtr,
parentQueryVectorPtr);
logLikelihood += localLogLikelihood;
}
vector_free(queryVectorPtr);
vector_free(parentQueryVectorPtr);
SEQ_FREE(queries);
long numRecord = adtreePtr->numRecord;
float penalty = (float)(-0.5 * (double)numTotalParent * log((double)numRecord));
float score = penalty + numRecord * logLikelihood;
return score;
}
/* #############################################################################
* TEST_LEARNER
* #############################################################################
*/
#ifdef TEST_LEARNER
#include <stdio.h>
static void
testPartition (long min, long max, long n)
{
long start;
long stop;
printf("min=%li max=%li, n=%li\n", min, max, n);
long i;
for (i = 0; i < n; i++) {
createPartition(min, max, i, n, &start, &stop);
printf("%li: %li -> %li\n", i, start, stop);
}
puts("");
}
int
main (int argc, char* argv[])
{
thread_startup(1);
puts("Starting...");
testPartition(0, 4, 8);
testPartition(0, 15, 8);
testPartition(3, 103, 7);
long numVar = 56;
long numRecord = 256;
random_t* randomPtr = random_alloc();
data_t* dataPtr = data_alloc(numVar, numRecord, randomPtr);
assert(dataPtr);
data_generate(dataPtr, 0, 10, 10);
adtree_t* adtreePtr = adtree_alloc();
assert(adtreePtr);
adtree_make(adtreePtr, dataPtr);
learner_t* learnerPtr = learner_alloc(dataPtr, adtreePtr, 1);
assert(learnerPtr);
data_free(dataPtr);
learner_run(learnerPtr);
assert(!net_isCycle(learnerPtr->netPtr));
float score = learner_score(learnerPtr);
printf("score = %lf\n", score);
learner_free(learnerPtr);
puts("Done.");
adtree_free(adtreePtr);
random_free(randomPtr);
thread_shutdown();
return 0;
}
#endif /* TEST_LEARNER */
/* =============================================================================
*
* End of learner.h
*
* =============================================================================
*/
|
apply_op.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void apply_op(domain_type * domain, int level, int Ax_id, int x_id, double a, double b){ // y=Ax or y=D^{-1}Ax = lambda[]Ax
// exchange the boundary of x in preparation for Ax
// note, Ax (one power) with a 7-point stencil requires only the faces
exchange_boundary(domain,level,x_id,1,0,0);
// now do Ax proper...
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k,s;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double h2inv = 1.0/(domain->h[level]*domain->h[level]);
double * __restrict__ x = domain->subdomains[box].levels[level].grids[ x_id] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
double * __restrict__ Ax = domain->subdomains[box].levels[level].grids[ Ax_id] + ghosts*(1+pencil+plane);
double * __restrict__ alpha = domain->subdomains[box].levels[level].grids[ __alpha ] + ghosts*(1+pencil+plane);
double * __restrict__ beta_i = domain->subdomains[box].levels[level].grids[ __beta_i] + ghosts*(1+pencil+plane);
double * __restrict__ beta_j = domain->subdomains[box].levels[level].grids[ __beta_j] + ghosts*(1+pencil+plane);
double * __restrict__ beta_k = domain->subdomains[box].levels[level].grids[ __beta_k] + ghosts*(1+pencil+plane);
double * __restrict__ lambda = domain->subdomains[box].levels[level].grids[ __lambda] + ghosts*(1+pencil+plane);
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
double helmholtz = a*alpha[ijk]*x[ijk] - b*h2inv*(
beta_i[ijk+1 ]*( x[ijk+1 ]-x[ijk ] )
-beta_i[ijk ]*( x[ijk ]-x[ijk-1 ] )
+beta_j[ijk+pencil]*( x[ijk+pencil]-x[ijk ] )
-beta_j[ijk ]*( x[ijk ]-x[ijk-pencil] )
+beta_k[ijk+plane ]*( x[ijk+plane ]-x[ijk ] )
-beta_k[ijk ]*( x[ijk ]-x[ijk-plane ] )
);
Ax[ijk] = helmholtz;
}}}
}
domain->cycles.apply_op[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
|
GB_binop__ne_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_uint64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__ne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint64)
// A*D function (colscale): GB (_AxD__ne_uint64)
// D*A function (rowscale): GB (_DxB__ne_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint64)
// C=scalar+B GB (_bind1st__ne_uint64)
// C=scalar+B' GB (_bind1st_tran__ne_uint64)
// C=A+scalar GB (_bind2nd__ne_uint64)
// C=A'+scalar GB (_bind2nd_tran__ne_uint64)
// C type: bool
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_UINT64 || GxB_NO_NE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ne_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
proposed.c |
#ifdef _OPENMP
#include <omp.h>
#endif
#include <stdlib.h>
#include <stdio.h>
#include <R.h>
#include <math.h>
#include <Rmath.h>
#include <Rinternals.h>
#include <R_ext/Lapack.h>
#include <R_ext/BLAS.h>
#include <R_ext/Utils.h>
SEXP proposed(SEXP X_r, SEXP Y_r, SEXP n_r, SEXP m_r, SEXP p_r, SEXP N_r, SEXP len_r, SEXP e_r, SEXP M_r, SEXP stats_r, SEXP T1_r){
const int incOne=1;
const char ntran='N';
const double one = 1.0; const double zero = 0.0;
int info;
int n = INTEGER(n_r)[0]; int m = INTEGER(m_r)[0]; int p = INTEGER(p_r)[0]; int len = INTEGER(len_r)[0]; int M = INTEGER(M_r)[0];
double *X = REAL(X_r); double *Y = REAL(Y_r); double *N = REAL(N_r);
double *e = REAL(e_r);
double *T1 = REAL(T1_r); double *stats = REAL(stats_r);
int i, q; const int d = 4;
int np = n*p, md=m*d;
double *T = (double *) S_alloc(len, sizeof(double));
GetRNGstate();
#ifdef _OPENMP
#pragma omp parallel for
for(i=0;i<len;i++){
double *X0 = (double*) malloc(sizeof(double) * n);
double *Y0 = (double*) malloc(sizeof(double) * m);
double *u = (double*) malloc(sizeof(double) * p);
double *Z = (double*) malloc(sizeof(double) * m);
double *Phi1 = (double*) malloc(sizeof(double) * d);
double counter;
int j, k;
for(j=0;j<p;j++) u[j] = N[j*len+i];
F77_NAME(dgemv)(&ntran, &n, &p, &one, X, &n, u, &incOne, &zero, X0, &incOne); // X0 = X*u
F77_NAME(dgemv)(&ntran, &m, &p, &one, Y, &m, u, &incOne, &zero, Y0, &incOne); // Y0 = Y*u
for(k=0;k<d;k++) Phi1[k] = 0.0;
for(j=0;j<m;j++){
counter = 0.0;
for(k=0;k<n;k++) if(X0[k]<=Y0[j]) counter++;
Z[j] = counter/n;
for(k=0;k<d;k++) Phi1[k] += sqrt(2.0)*cos(Z[j]*PI*(k+1));
}
T[i] = fabs(Phi1[0]);
for(k=1;k<d;k++) if(fabs(Phi1[k]) > T[i]) T[i] = fabs(Phi1[k]);
T[i] /= sqrt(m);
free(X0);
free(Y0);
free(u);
free(Z);
free(Phi1);
}
#else
Rprintf('openMP not used\n')
#endif
stats[0] = T[0];
for(i=1;i<len;i++) if(T[i] > stats[0]) stats[0] = T[i];
stats[0] *= sqrt(n/(m+n+0.0));
//Rprintf("stats is %4.6f.\n", stats[0]);
//F77_NAME(dcopy)(&N, Lo, &incOne, Sigma, &incOne);
#ifdef _OPENMP
#pragma omp parallel for//parallel //num_threads(4)
for(q=0;q<M;q++){
//Rprintf("slave %2d starts\n", q);
//int me;
//me = omp_get_thread_num();
//Rprintf("number of threads: %2d\n", me);
double *X1 = (double*) malloc(sizeof(double) * n);
double *u1 = (double*) malloc(sizeof(double) * p);
double *Z1 = (double*) malloc(sizeof(double) * n);
double *Phi11 = (double*) malloc(sizeof(double) * d);
double *T0 = (double*) malloc(sizeof(double) * len);
double counter1;
int i0, j0, k0;
// double *e = (double *) S_alloc(n, sizeof(double));
// for(i=0;i<n;i++) e[i] = rnorm(0.0, 1.0);
for(i0=0;i0<len;i0++){
for(j0=0;j0<p;j0++) u1[j0] = N[j0*len+i0];
F77_NAME(dgemv)(&ntran, &n, &p, &one, X, &n, u1, &incOne, &zero, X1, &incOne); // X1 = X*u
for(k0=0;k0<d;k0++) Phi11[k0] = 0.0;
for(j0=0;j0<n;j0++){
counter1 = 0.0;
for(k0=0;k0<n;k0++) if(X1[k0]<=X1[j0]) counter1++;
Z1[j0] = counter1/n;
for(k0=0;k0<d;k0++)
Phi11[k0] += e[q*n+j0]*sqrt(2.0)*cos(Z1[j0]*PI*(k0+1));
}
T0[i0] = fabs(Phi11[0]);
for(k0=1;k0<d;k0++) if(fabs(Phi11[k0]) > T0[i0]) T0[i0] = fabs(Phi11[k0]);
T0[i0] /= sqrt(n);
if(i0==0) T1[q] = T0[i0];
else if(T0[i0]>T1[q]) T1[q] = T0[i0];
}
//Rprintf("slave %2d ends\n", q);
free(X1);
free(u1);
free(Z1);
free(Phi11);
free(T0);
}
#else
Rprintf('openMP not used\n')
#endif
return(R_NilValue);
}
|
private_universe.h | #ifndef PRIVATE_UNIVERSE__H
#define PRIVATE_UNIVERSE__H
#include "radiance.h"
#include "table.h"
#include "stack_memory.h"
#include <algorithm>
#include <cstring>
#include <limits>
#include <set>
#include <string>
#include <unordered_map>
#include <vector>
#include <mutex>
#define LOG_VAR(var) std::cout << #var << " = " << var << std::endl
namespace radiance {
const static char NAMESPACE_DELIMETER = '/';
class CollectionImpl {
private:
Collection* collection_;
public:
CollectionImpl(Collection* collection) : collection_(collection) {}
inline Collection* collection() {
return collection_;
}
};
class CollectionRegistry {
public:
Collection* add(const char* program, const char* collection) {
std::string name = std::string(program) + NAMESPACE_DELIMETER + std::string(collection);
Collection* ret = nullptr;
if (collections_.find(name) == -1) {
Handle id = collections_.insert(name, nullptr);
ret = new_collection(id, collection);
collections_[id] = ret;
}
return ret;
}
Status::Code share(const char* source, const char* dest) {
Handle src = collections_.find(source);
Handle dst = collections_.find(dest);
if (src != -1 && dst == -1) {
collections_.insert(dest, collections_[src]);
return Status::OK;
}
return Status::ALREADY_EXISTS;
}
Collection* get(const char* name) {
Collection* ret = nullptr;
Handle id;
if ((id = collections_.find(name)) != -1) {
ret = collections_[id];
}
return ret;
}
Collection* get(const char* program, const char* collection) {
std::string name = std::string(program) + NAMESPACE_DELIMETER + std::string(collection);
return get(name.data());
}
inline static CollectionImpl* to_impl(Collection* c) {
return (CollectionImpl*)c->self;
}
private:
Collection* new_collection(Id id, const char* name) {
Collection* c = (Collection*)malloc(sizeof(Collection));
memset(c, 0, sizeof(Collection));
*(Id*)(&c->id) = id;
*(char**)(&c->name) = (char*)name;
*(void**)(&c->self) = new CollectionImpl(c);
return c;
}
Table<std::string, Collection*> collections_;
};
class PipelineImpl {
private:
Pipeline* pipeline_;
std::vector<Collection*> sources_;
std::vector<Collection*> sinks_;
public:
PipelineImpl(Pipeline* pipeline) : pipeline_(pipeline) {}
void add_source(Collection* source) {
if (source) {
if (std::find(sources_.begin(), sources_.end(), source) == sources_.end()) {
sources_.push_back(source);
}
}
}
void add_sink(Collection* sink) {
if (sink) {
if (std::find(sinks_.begin(), sinks_.end(), sink) == sinks_.end()) {
sinks_.push_back(sink);
}
}
}
void run() {
size_t source_size = sources_.size();
size_t sink_size = sinks_.size();
if (source_size == 1 && sink_size == 1) {
run_1_to_1();
} else if (source_size == 1 && sink_size == 0) {
run_1_to_0();
}
}
void run_1_to_0() {
Collection* source = sources_[0];
uint64_t count = source->count(source);
#pragma omp parallel for
for(uint64_t i = 0; i < count; ++i) {
thread_local static Stack stack;
source->copy(
source->keys.data + source->keys.offset + i * source->keys.size,
source->values.data + source->values.offset + i * source->values.size,
i, &stack);
pipeline_->transform(&stack);
stack.clear();
}
}
void run_1_to_1() {
Collection* source = sources_[0];
Collection* sink = sinks_[0];
uint64_t count = source->count(source);
#pragma omp parallel for
for(uint64_t i = 0; i < count; ++i) {
thread_local static Stack stack;
source->copy(
source->keys.data + source->keys.offset + i * source->keys.size,
source->values.data + source->values.offset + i * source->values.size,
i, &stack);
pipeline_->transform(&stack);
sink->mutate(sink, (const Mutation*)stack.top());
stack.clear();
}
}
void run_m_to_n() {
Stack stack;
std::unordered_map<uint8_t*, std::vector<uint8_t*>> joined;
// Naive Hash-Join implementation.
uint64_t min_count = std::numeric_limits<uint64_t>::max();
Collection* min_collection = nullptr;
for (Collection* c : sources_) {
if (c->count(c) < min_count) {
min_collection = c;
}
}
{
uint8_t* keys = min_collection->keys.data + min_collection->keys.offset;
uint8_t* values = min_collection->values.data + min_collection->values.offset;
for(uint64_t i = 0; i < min_count; ++i) {
joined[keys].push_back(values);
keys += min_collection->keys.size;
values += min_collection->values.size;
}
}
for (Collection* c : sources_) {
if (c == min_collection) continue;
if (c == sources_.back()) continue;
uint8_t* keys = min_collection->keys.data + min_collection->keys.offset;
uint8_t* values = min_collection->values.data + min_collection->values.offset;
for(uint64_t i = 0; i < min_count; ++i) {
joined[keys].push_back(values);
keys += c->keys.size;
values += c->values.size;
}
}
{
Collection* c = sources_.back();
uint8_t* keys = c->keys.data + c->keys.offset;
uint8_t* values = c->values.data + c->values.offset;
for(uint64_t i = 0; i < min_count; ++i) {
auto joined_values = joined[keys];
joined_values.push_back(values);
if (joined_values.size() == sources_.size()) {
}
keys += c->keys.size;
values += c->values.size;
}
}
}
};
class ProgramImpl {
private:
typedef Table<Collection*, std::set<Pipeline*>> Mutators;
public:
ProgramImpl(Program* program) : program_(program) {}
Pipeline* add_pipeline(Collection* source, Collection* sink) {
Pipeline* pipeline = new_pipeline(pipelines_.size());
pipelines_.push_back(pipeline);
add_source(pipeline, source);
add_sink(pipeline, sink);
return pipeline;
}
Status::Code remove_pipeline(struct Pipeline* pipeline) {
return disable_pipeline(pipeline);
}
Status::Code enable_pipeline(struct Pipeline* pipeline, ExecutionPolicy policy) {
disable_pipeline(pipeline);
if (policy.trigger == Trigger::LOOP) {
loop_pipelines_.push_back(pipeline);
std::sort(loop_pipelines_.begin(), loop_pipelines_.end());
} else if (policy.trigger == Trigger::EVENT) {
event_pipelines_.insert(pipeline);
} else {
return Status::UNKNOWN_TRIGGER_POLICY;
}
return Status::OK;
}
Status::Code disable_pipeline(struct Pipeline* pipeline) {
auto found = std::lower_bound(loop_pipelines_.begin(), loop_pipelines_.end(), pipeline);
if (found != loop_pipelines_.end()) {
loop_pipelines_.erase(found);
}
event_pipelines_.erase(pipeline);
return Status::OK;
}
Status::Code add_source(Pipeline* pipeline, Collection* source) {
((PipelineImpl*)(pipeline->self))->add_source(source);
return add_pipeline_to_collection(pipeline, source, &readers_);
}
Status::Code add_sink(Pipeline* pipeline, Collection* sink) {
((PipelineImpl*)(pipeline->self))->add_sink(sink);
return add_pipeline_to_collection(pipeline, sink, &writers_);
}
bool contains_pipeline(Pipeline* pipeline) {
return std::find(pipelines_.begin(), pipelines_.end(), pipeline) != pipelines_.end();
}
void run() {
for(Pipeline* p : loop_pipelines_) {
((PipelineImpl*)p->self)->run();
}
}
private:
Pipeline* new_pipeline(Id id) {
Pipeline* p = (Pipeline*)malloc(sizeof(Pipeline));
memset(p, 0, sizeof(Pipeline));
*(Id*)(&p->id) = id;
*(Id*)(&p->program) = program_->id;
p->self = new PipelineImpl(p);
return p;
}
Status::Code add_pipeline_to_collection(
Pipeline* pipeline, Collection* collection, Mutators* table) {
if (!pipeline || !collection || !table) {
return Status::NULL_POINTER;
}
if (!contains_pipeline(pipeline)) {
return Status::DOES_NOT_EXIST;
}
Handle handle = table->find(collection);
if (handle == -1) {
handle = table->insert(collection, {});
}
std::set<Pipeline*>& pipelines = (*table)[handle];
if (pipelines.find(pipeline) == pipelines.end()) {
pipelines.insert(pipeline);
} else {
return Status::ALREADY_EXISTS;
}
return Status::OK;
}
Program* program_;
Mutators writers_;
Mutators readers_;
std::vector<Pipeline*> pipelines_;
std::vector<Pipeline*> loop_pipelines_;
std::set<Pipeline*> event_pipelines_;
};
class ProgramRegistry {
public:
Id create_program(const char* program) {
Id id = programs_.find(program);
if (id == -1) {
id = programs_.insert(program, nullptr);
Program* p = new_program(id, program);
p->self = new ProgramImpl{p};
programs_[id] = p;
}
return id;
}
Status::Code add_source(Pipeline* pipeline, Collection* collection) {
Program* p = programs_[pipeline->program];
return to_impl(p)->add_source(pipeline, collection);
}
Status::Code add_sink(Pipeline* pipeline, Collection* collection) {
Program* p = programs_[pipeline->program];
return to_impl(p)->add_sink(pipeline, collection);
}
Program* get_program(const char* program) {
Handle id = programs_.find(program);
if (id == -1) {
return nullptr;
}
return programs_[id];
}
Program* get_program(Id id) {
return programs_[id];
}
inline ProgramImpl* to_impl(Program* p) {
return (ProgramImpl*)(p->self);
}
private:
Program* new_program(Id id, const char* name) {
Program* p = (Program*)malloc(sizeof(Program));
memset(p, 0, sizeof(Program));
*(Id*)(&p->id) = id;
*(char**)(&p->name) = (char*)name;
return p;
}
Table<std::string, Program*> programs_;
};
class PrivateUniverse {
public:
enum class RunState {
ERROR = -1,
UNKNOWN = 0,
INITIALIZED,
STARTED,
RUNNING,
STOPPED,
};
PrivateUniverse();
~PrivateUniverse();
Status::Code init();
Status::Code start();
Status::Code stop();
Status::Code loop();
Id create_program(const char* name);
// Pipeline manipulation.
struct Pipeline* add_pipeline(const char* program, const char* source, const char* sink);
struct Pipeline* copy_pipeline(struct Pipeline* pipeline, const char* dest);
Status::Code remove_pipeline(struct Pipeline* pipeline);
Status::Code enable_pipeline(struct Pipeline* pipeline, ExecutionPolicy policy);
Status::Code disable_pipeline(struct Pipeline* pipeline);
// Collection manipulation.
Collection* add_collection(const char* program, const char* collection);
Status::Code add_source(Pipeline*, const char* collection);
Status::Code add_sink(Pipeline*, const char* collection);
Status::Code share_collection(const char* source, const char* dest);
Status::Code copy_collection(const char* source, const char* dest);
private:
Status::Code transition(RunState allowed, RunState next);
Status::Code transition(std::vector<RunState>&& allowed, RunState next);
CollectionRegistry collections_;
ProgramRegistry programs_;
RunState run_state_;
};
} // namespace radiance
#endif // PRIVATE_UNIVERSE__H
|
7900.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(i, j) collapse(#P12) schedule(#P9, #P11) num_threads(#P11)
#pragma omp parallel for simd
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp target teams distribute #p #p
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
parallel_macros.h | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2016, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>
// Author: Enrico Siragusa <enrico.siragusa@fu-berlin.de>
// ==========================================================================
// Utility macros for parallelism.
// ==========================================================================
#ifndef SEQAN_PARALLEL_PARALLEL_MACROS_H_
#define SEQAN_PARALLEL_PARALLEL_MACROS_H_
/*!
* @macro SEQAN_OMP_PRAGMA
* @headerfile <seqan/parallel.h>
* @brief Portable conditional <tt>#pragma</tt> issuing if OpenMP is enabled.
*
* @signature SEQAN_OMP_PRAGMA(x)
*
* @param x The string to issue behind <tt>#pragma omp</tt>.
*
* @section Remarks
*
* This macro uses portable pragma generation, dependent on the macro <tt>_OPENMP</tt> being defined (as by
* the OpenMP standard).
*
* This is useful for disabling OpenMP pragmas on compilers that do not support OpenMP or when OpenMP is not enabled to
* suppress warnings.
*
* @section Example
*
* Parallelize loop with OpenMP if OpenMP is enabled:
*
* @code{.cpp}
* SEQAN_OMP_PRAGMA(parallel for) // becomes: #pragma omp parallel for
* for (int i = 0; i < x; ++i)
* {
* // Do work.
* }
* @endcode
*
* Make an addition atomic if OpenMP is enabled:
*
* @code{.cpp}
* SEQAN_OMP_PRAGMA(parallel atomic) // becomes: #pragma omp parallel atomic
* i += 1;
* @endcode
*/
#ifdef _OPENMP
#include <omp.h>
#if defined(PLATFORM_GCC)
// GCC _Pragma operator
#define SEQAN_DO_PRAGMA(x) _Pragma(# x)
#define SEQAN_OMP_PRAGMA(x) SEQAN_DO_PRAGMA(omp x)
#else // #if defined(PLATFORM_GCC)
// MSVC __pragma-operator
#define SEQAN_OMP_PRAGMA(x) __pragma(omp x)
#endif // #if defined(PLATFORM_GCC)
#else // #ifdef _OPENMP
#define SEQAN_OMP_PRAGMA(x)
// low-level OpenMP runtime compatibility
inline void omp_set_num_threads(int)
{}
inline int omp_get_num_threads()
{
return 1;
}
inline int omp_get_max_threads()
{
return 1;
}
inline int omp_get_thread_num()
{
return 0;
}
inline double omp_get_wtime()
{
return seqan::sysTime();
}
#endif // #ifdef _OPENMP
// ----------------------------------------------------------------------------
// Function getThreadId()
// ----------------------------------------------------------------------------
SEQAN_HOST_DEVICE inline unsigned getThreadId()
{
#if defined(__CUDA_ARCH__)
return blockIdx.x * blockDim.x + threadIdx.x;
#elif defined(_OPENMP)
return omp_get_thread_num();
#else
return 0;
#endif
}
#endif // SEQAN_PARALLEL_PARALLEL_MACROS_H_
|
CalcBlowingSnow.c | /******************************************************************************
* @section DESCRIPTION
*
* Calculate energy of sublimation from blowing snow.
*****************************************************************************/
#include <vic_run.h>
/******************************************************************************
* @brief Calculate sublimation from blowing snow
*****************************************************************************/
double
CalcBlowingSnow(double Dt,
double Tair,
unsigned LastSnow,
double SurfaceLiquidWater,
double Wind,
double Ls,
double AirDens,
double EactAir,
double ZO,
double Zrh,
double snowdepth,
double lag_one,
double sigma_slope,
double Tsnow,
int iveg,
int Nveg,
double fe,
double displacement,
double roughness,
double *TotalTransport)
{
extern parameters_struct param;
extern option_struct options;
/* Local variables: */
double Age;
double U10, Uo, prob_occurence;
double es, Ros, F;
double SubFlux;
double Diffusivity;
double ushear;
double Tk;
double utshear;
int p;
double upper, lower, Total;
double area;
double sigma_w;
double Zo_salt;
double ratio, wind10;
double Uveg, hv, Nd;
double Transport;
/*******************************************************************/
/* Calculate some general variables, that don't depend on wind speed. */
/* Age in hours */
Age = LastSnow * Dt / SEC_PER_HOUR;
/* Saturation density of water vapor, Liston A-8 */
es = svp(Tair);
Tk = Tair + CONST_TKFRZ;
Ros = CONST_EPS * es / (CONST_RDAIR * Tk);
/* Diffusivity in m2/s, Liston eq. A-7 */
Diffusivity = (2.06e-5) * pow(Tk / 273., 1.75);
// Essery et al. 1999, eq. 6 (m*s/kg)
F = (Ls / (param.BLOWING_KA * Tk)) * (Ls * Tk / CONST_RDAIR - 1.);
F += 1. / (Diffusivity * Ros);
/* grid cell 10 m wind speed = 50th percentile wind */
/* Wind speed at 2 m above snow was passed to this function. */
wind10 = Wind * log(10. / ZO) / log((2 + ZO) / ZO);
/* Check for bare soil case. */
if (iveg == Nveg) {
fe = 1500;
sigma_slope = .0002;
}
// sigma_w/uo:
ratio = (2.44 - (0.43) * lag_one) * sigma_slope;
sigma_w = wind10 * ratio;
Uo = wind10;
/*********** Parameters for roughness above snow. *****************/
hv = (3. / 2.) * displacement;
Nd = (4. / 3.) * (roughness / displacement);
/*******************************************************************/
/** Begin loop through wind probability function. */
Total = 0.0;
*TotalTransport = 0.0;
area = 1. / (double) param.BLOWING_NUMINCS;
if (snowdepth > 0.0) {
if (options.BLOWING_SPATIAL_WIND && sigma_w != 0.) {
for (p = 0; p < param.BLOWING_NUMINCS; p++) {
SubFlux = lower = upper = 0.0;
/* Find the limits of integration. */
if (p == 0) {
lower = -9999;
upper = Uo + sigma_w * log(2. * (p + 1) * area);
}
else if (p > 0 && p < param.BLOWING_NUMINCS / 2) {
lower = Uo + sigma_w * log(2. * (p) * area);
upper = Uo + sigma_w * log(2. * (p + 1) * area);
}
else if (p < (param.BLOWING_NUMINCS - 1) && p >=
(double) param.BLOWING_NUMINCS / 2) {
lower = Uo - sigma_w * log(2. - 2. * (p * area));
upper = Uo - sigma_w * log(2. - 2. * ((p + 1.) * area));
}
else if (p == param.BLOWING_NUMINCS - 1) {
lower = Uo - sigma_w * log(2. - 2. * (p * area));
upper = 9999;
}
if (lower > upper) { /* Could happen if lower > Uo*2 */
lower = upper;
log_err("Error with probability boundaries");
}
/* Find expected value of wind speed for the interval. */
U10 = Uo;
if (lower >= Uo) {
U10 = -0.5 *
((upper +
sigma_w) * exp((-1. / sigma_w) * (upper - Uo)) -
(lower +
sigma_w) *
exp((-1. / sigma_w) * (lower - Uo))) / area;
}
else if (upper <= Uo) {
U10 = 0.5 *
((upper -
sigma_w) * exp((1. / sigma_w) * (upper - Uo)) -
(lower -
sigma_w) *
exp((1. / sigma_w) * (lower - Uo))) / area;
}
else {
log_err("Problem with probability ranges: Increment = %d, "
"integration limits = %f - %f", p, upper, lower);
}
if (U10 < 0.4) {
U10 = .4;
}
if (U10 > 25.) {
U10 = 25.;
}
/*******************************************************************/
/* Calculate parameters for probability of blowing snow occurence. */
/* ( Li and Pomeroy 1997) */
if (snowdepth < hv) {
Uveg = U10 / sqrt(1. + 170 * Nd * (hv - snowdepth));
}
else {
Uveg = U10;
}
prob_occurence = get_prob(Tair, Age, SurfaceLiquidWater, Uveg);
/*******************************************************************/
/* Calculate threshold shear stress. Send 0 for constant or */
/* 1 for variable threshold after Li and Pomeroy (1997) */
utshear =
get_thresh(Tair, SurfaceLiquidWater, ZO);
/* Iterate to find actual shear stress during saltation. */
shear_stress(U10, ZO, &ushear, &Zo_salt, utshear);
if (ushear > utshear) {
SubFlux = CalcSubFlux(EactAir, es, Zrh, AirDens, utshear,
ushear, fe, Tsnow,
Tair, U10, Zo_salt, F, &Transport);
}
else {
SubFlux = 0.0;
Transport = 0.0;
}
Total += (1. / (double) param.BLOWING_NUMINCS) * SubFlux *
prob_occurence;
*TotalTransport += (1. / (double) param.BLOWING_NUMINCS) *
Transport * prob_occurence;
}
}
else {
U10 = Uo;
/*******************************************************************/
/* Calculate parameters for probability of blowing snow occurence. */
/* ( Li and Pomeroy 1997) */
if (snowdepth < hv) {
Uveg = U10 / sqrt(1. + 170 * Nd * (hv - snowdepth));
}
else {
Uveg = U10;
}
prob_occurence = get_prob(Tair, Age, SurfaceLiquidWater, Uveg);
/*******************************************************************/
/* Calculate threshold shear stress. Send 0 for constant or */
/* 1 for variable threshold after Li and Pomeroy (1997) */
utshear = get_thresh(Tair, SurfaceLiquidWater, ZO);
/* Iterate to find actual shear stress during saltation. */
shear_stress(Uo, ZO, &ushear, &Zo_salt, utshear);
if (ushear > utshear) {
SubFlux = CalcSubFlux(EactAir, es, Zrh, AirDens, utshear,
ushear, fe, Tsnow,
Tair, Uo, Zo_salt, F, &Transport);
}
else {
SubFlux = 0.0;
Transport = 0.0;
}
Total = SubFlux * prob_occurence;
*TotalTransport = Transport * prob_occurence;
}
}
if (Total < -.00005) {
Total = -.00005;
}
return Total;
}
/******************************************************************************
* @brief Integration is performed by Romberg's method: Numerical Recipes
* in C Section 4.3
*****************************************************************************/
double
qromb(double (*funcd)(),
double es,
double Wind,
double AirDens,
double ZO,
double EactAir,
double F,
double hsalt,
double phi_r,
double ushear,
double Zrh,
double a,
double b)
{
extern parameters_struct param;
double ss, dss;
double s[param.BLOWING_MAX_ITER + 1];
double h[param.BLOWING_MAX_ITER + 2];
int j;
h[1] = 1.0;
for (j = 1; j <= param.BLOWING_MAX_ITER; j++) {
s[j] = trapzd(funcd, es, Wind, AirDens, ZO, EactAir, F, hsalt, phi_r,
ushear, Zrh, a, b, j);
if (j >= param.BLOWING_K) {
polint(&h[j - param.BLOWING_K], &s[j - param.BLOWING_K],
param.BLOWING_K, 0.0, &ss, &dss);
if (fabs(dss) <= DBL_EPSILON * fabs(ss)) {
return ss;
}
}
h[j + 1] = 0.25 * h[j];
}
log_err("Too many steps");
}
/******************************************************************************
* @brief Interpolate a set of N points by fitting a polynomial of degree N-1
*****************************************************************************/
void
polint(double xa[],
double ya[],
int n,
double x,
double *y,
double *dy)
{
int i, m, ns;
double den, dif, dift, ho, hp, w;
double *c = NULL;
double *d = NULL;
ns = 1;
dif = fabs(x - xa[1]);
c = (double *)malloc((size_t) ((n + 1) * sizeof(double)));
check_alloc_status(c, "Memory allocation error.");
d = (double *)malloc((size_t) ((n + 1) * sizeof(double)));
check_alloc_status(d, "Memory allocation error.");
for (i = 1; i <= n; i++) {
if ((dift = fabs(x - xa[i])) < dif) {
ns = i;
dif = dift;
}
c[i] = ya[i];
d[i] = ya[i];
}
*y = ya[ns--];
for (m = 1; m < n; m++) {
for (i = 1; i <= n - m; i++) {
ho = xa[i] - x;
hp = xa[i + m] - x;
w = c[i + 1] - d[i];
if ((den = ho - hp) == 0.0) {
log_err("interpolation error");
}
den = w / den;
d[i] = hp * den;
c[i] = ho * den;
}
*y += (*dy = (2 * ns < (n - m) ? c[ns + 1] : d[ns--]));
}
free(d);
free(c);
}
/******************************************************************************
* @brief Compute the nth stage of refinement of an extended trapezoidal rule.
*****************************************************************************/
double
trapzd(double (*funcd)(),
double es,
double Wind,
double AirDens,
double ZO,
double EactAir,
double F,
double hsalt,
double phi_r,
double ushear,
double Zrh,
double a,
double b,
int n)
{
double x, tnm, sum, del;
int it, j;
// TODO: remove use of static variables (see GH #735), for now:
// make static variables thread safe
static double s;
#pragma omp threadprivate(s)
if (n == 1) {
return (s = 0.5 *
(b -
a) *
((*funcd)(a, es, Wind, AirDens, ZO, EactAir, F, hsalt,
phi_r, ushear, Zrh) +
(*funcd)(b, es, Wind, AirDens, ZO, EactAir, F, hsalt,
phi_r, ushear, Zrh)));
}
else {
for (it = 1, j = 1; j < n - 1; j++) {
it <<= 1;
}
tnm = it;
del = (b - a) / tnm;
x = a + 0.5 * del;
for (sum = 0.0, j = 1; j <= it; j++, x += del) {
sum +=
(*funcd)(x, es, Wind, AirDens, ZO, EactAir, F, hsalt, phi_r,
ushear, Zrh);
}
s = 0.5 * (s + (b - a) * sum / tnm);
return s;
}
}
/******************************************************************************
* @brief Newton-Raphson method.
*****************************************************************************/
double
rtnewt(double x1,
double x2,
double acc,
double Ur,
double Zr)
{
extern parameters_struct param;
int j;
double df, dx, dxold, f, fh, fl;
double temp, xh, xl, rts;
get_shear(x1, &fl, &df, Ur, Zr);
get_shear(x2, &fh, &df, Ur, Zr);
if ((fl > 0.0 && fh > 0.0) || (fl < 0.0 && fh < 0.0)) {
log_err("Root must be bracketed");
}
if (fl == 0.0) {
return x1;
}
if (fh == 0.0) {
return x2;
}
if (fl < 0.0) {
xl = x1;
xh = x2;
}
else {
xh = x1;
xl = x2;
}
rts = 0.5 * (x1 + x2);
dxold = fabs(x2 - x1);
dx = dxold;
get_shear(rts, &f, &df, Ur, Zr);
for (j = 1; j <= param.BLOWING_MAX_ITER; j++) {
if ((((rts - xh) * df - f) * ((rts - x1) * df - f) > 0.0) ||
(fabs(2.0 * f) > fabs(dxold * df))) {
dxold = dx;
dx = 0.5 * (xh - xl);
rts = xl + dx;
if (xl == rts) {
return rts;
}
}
else {
dxold = dx;
dx = f / df;
temp = rts;
rts -= dx;
if (temp == rts) {
return rts;
}
}
if (fabs(dx) < acc) {
return rts;
}
// if(rts < .025) rts=.025;
get_shear(rts, &f, &df, Ur, Zr);
if (f < 0.0) {
xl = rts;
}
else {
xh = rts;
}
}
log_err("Maximum number of iterations exceeded");
}
/******************************************************************************
* @brief This routine resets the values of all output variables to 0.
*****************************************************************************/
void
get_shear(double x,
double *f,
double *df,
double Ur,
double Zr)
{
*f =
log(2. * CONST_G * Zr / .12) + log(1 / (x * x)) - CONST_KARMAN * Ur / x;
*df = CONST_KARMAN * Ur / (x * x) - 2. / x;
}
/******************************************************************************
* @brief Calculate the sublimation rate for a given height above the
* boundary layer.
*****************************************************************************/
double
sub_with_height(double z,
double es,
double Wind,
double AirDens,
double ZO,
double EactAir,
double F,
double hsalt,
double phi_r,
double ushear,
double Zrh)
{
extern parameters_struct param;
/* Local variables */
double Rrz, ALPHAz, Mz;
double Rmean, terminal_v, fluctuat_v;
double Vtz, Re, Nu;
double sigz, dMdt;
double temp;
double psi_t, phi_t;
// Calculate sublimation loss rate (1/s)
Rrz = 4.6e-5 * pow(z, -.258);
ALPHAz = 4.08 + 12.6 * z;
Mz =
(4. /
3.) * CONST_PI * CONST_RHOICE * Rrz * Rrz * Rrz *
(1. + (3. / ALPHAz) + (2. / (ALPHAz * ALPHAz)));
Rmean = pow((3. * Mz) / (4. * CONST_PI * CONST_RHOICE), 1. / 3.);
// Pomeroy and Male 1986
terminal_v = 1.1e7 * pow(Rmean, 1.8);
// Pomeroy (1988)
fluctuat_v = 0.005 * pow(Wind, 1.36);
// Ventilation velocity for turbulent suspension Lee (1975)
Vtz = terminal_v + 3. * fluctuat_v * cos(CONST_PI / 4.);
Re = 2. * Rmean * Vtz / param.BLOWING_KIN_VIS;
Nu = 1.79 + 0.606 * pow(Re, 0.5);
// LCB: found error in rh calc, 1/20/04, check impact
sigz = ((EactAir / es) - 1.) * (1.019 + .027 * log(z));
dMdt = 2 * CONST_PI * Rmean * sigz * Nu / F;
// sublimation loss rate coefficient (1/s)
psi_t = dMdt / Mz;
// Concentration of turbulent suspended snow Kind (1992)
temp = (0.5 * ushear * ushear) / (Wind * param.BLOWING_SETTLING);
phi_t = phi_r *
((temp +
1.) *
pow((z / hsalt),
(-1. *
param.BLOWING_SETTLING) / (CONST_KARMAN * ushear)) - temp);
return psi_t * phi_t;
}
/******************************************************************************
* @brief Calculate parameters for probability of blowing snow occurence.
*
* @note see Li and Pomeroy 1997
*****************************************************************************/
double
get_prob(double Tair,
double Age,
double SurfaceLiquidWater,
double U10)
{
extern option_struct options;
double mean_u_occurence;
double sigma_occurence;
double prob_occurence;
if (options.BLOWING_CALC_PROB) {
if (SurfaceLiquidWater < 0.001) {
mean_u_occurence = 11.2 + 0.365 * Tair + 0.00706 * Tair * Tair +
0.9 * log(Age);
sigma_occurence = 4.3 + 0.145 * Tair + 0.00196 * Tair * Tair;
prob_occurence = 1. /
(1. +
exp(sqrt(CONST_PI) *
(mean_u_occurence - U10) / sigma_occurence));
}
else {
mean_u_occurence = 21.;
sigma_occurence = 7.;
prob_occurence = 1. /
(1. +
exp(sqrt(CONST_PI) *
(mean_u_occurence - U10) / sigma_occurence));
}
if (prob_occurence < 0.0) {
prob_occurence = 0.0;
}
if (prob_occurence > 1.0) {
prob_occurence = 1.0;
}
}
else {
prob_occurence = 1.;
}
return prob_occurence;
}
/******************************************************************************
* @brief Calculate threshold shear stress.
*****************************************************************************/
double
get_thresh(double Tair,
double SurfaceLiquidWater,
double Zo_salt)
{
double ut10;
double utshear;
extern parameters_struct param;
extern option_struct options;
if (SurfaceLiquidWater < 0.001) {
// Threshold wind speed after Li and Pomeroy (1997)
ut10 = 9.43 + .18 * Tair + .0033 * Tair * Tair;
}
else {
// Threshold wind speed after Li and Pomeroy (1997)
ut10 = 9.9;
}
if (options.BLOWING_VAR_THRESHOLD) {
// Variable threshold, Li and Pomeroy 1997
utshear = CONST_KARMAN * ut10 / log(10. / Zo_salt);
}
// Constant threshold, i.e. Liston and Sturm
else {
utshear = param.BLOWING_UTHRESH;
}
return utshear;
}
/******************************************************************************
* @brief Iterate to find actual shear stress during saltation.
*****************************************************************************/
void
shear_stress(double U10,
double ZO,
double *ushear,
double *Zo_salt,
double utshear)
{
double umin, umax, xacc;
double fl, fh, df;
/* Find min & max shear stress to bracket value. */
umin = utshear;
umax = CONST_KARMAN * U10;
xacc = 0.10 * umin;
/* Check to see if value is bracketed. */
get_shear(umin, &fl, &df, U10, 10.);
get_shear(umax, &fh, &df, U10, 10.);
if (fl < 0.0 && fh < 0.0) {
log_err("Solution surpasses upper boundary."
"fl(%f)=%f, fh(%f)=%f", umin, fl, umax, fh);
}
if (fl > 0.0 && fh > 0.0) {
*Zo_salt = ZO;
*ushear = CONST_KARMAN * U10 / log(10. / ZO);
}
else {
/* Iterate to find actual shear stress. */
*ushear = rtnewt(umin, umax, xacc, U10, 10.);
*Zo_salt = 0.12 * (*ushear) * (*ushear) / (2. * CONST_G);
}
}
/******************************************************************************
* @brief Calculate the sublimation flux.
*****************************************************************************/
double
CalcSubFlux(double EactAir,
double es,
double Zrh,
double AirDens,
double utshear,
double ushear,
double fe,
double Tsnow,
double Tair,
double U10,
double Zo_salt,
double F,
double *Transport)
{
extern parameters_struct param;
extern option_struct options;
double b, undersat_2;
double SubFlux;
double Qsalt, hsalt;
double phi_s, psi_s;
double T, ztop;
double particle;
double saltation_transport;
double suspension_transport;
SubFlux = 0.0;
particle = utshear * 2.8;
// SBSM:
if (options.BLOWING_SIMPLE) {
b = .25;
if (EactAir >= es) {
undersat_2 = 0.0;
}
else {
undersat_2 =
((EactAir / es) - 1.) * (1. - .027 * log(Zrh) + 0.027 * log(2));
}
SubFlux = b * undersat_2 * pow(U10, 5.) / F;
}
else {
// Sublimation flux (kg/m2*s) = mass-concentration * sublimation rate * height
// for both the saltation layer and the suspension layer
// Saltation layer is assumed constant with height
// Maximum saltation transport rate (kg/m*s)
// Liston and Sturm 1998, eq. 6
Qsalt = (param.BLOWING_CSALT * AirDens / CONST_G) *
(utshear / ushear) * (ushear * ushear - utshear * utshear);
if (options.BLOWING_FETCH) {
Qsalt *= (1. + (500. / (3. * fe)) * (exp(-3. * fe / 500.) - 1.));
}
// Pomeroy and Male (1992)
hsalt = 0.08436 * pow(ushear, 1.27);
// Saltation layer mass concentration (kg/m3)
phi_s = Qsalt / (hsalt * particle);
T = 0.5 * (ushear * ushear) / (U10 * param.BLOWING_SETTLING);
ztop = hsalt *
pow(T / (T + 1.),
(CONST_KARMAN * ushear) / (-1. * param.BLOWING_SETTLING));
if (EactAir >= es) {
SubFlux = 0.0;
}
else {
// Sublimation loss-rate for the saltation layer (s-1)
psi_s = sub_with_height(hsalt / 2., es, U10, AirDens, Zo_salt,
EactAir, F, hsalt,
phi_s, ushear, Zrh);
// Sublimation from the saltation layer in kg/m2*s
SubFlux = phi_s * psi_s * hsalt;
// Suspension layer must be integrated
SubFlux += qromb(sub_with_height, es, U10, AirDens, Zo_salt,
EactAir, F, hsalt,
phi_s, ushear, Zrh, hsalt, ztop);
}
// Transport out of the domain by saltation Qs(fe) (kg/m*s), eq 10 Liston and Sturm
saltation_transport = Qsalt * (1 - exp(-3. * fe / 500.));
// Transport in the suspension layer
suspension_transport = qromb(transport_with_height, es, U10, AirDens,
Zo_salt,
EactAir, F, hsalt, phi_s, ushear, Zrh,
hsalt, ztop);
// Transport at the downstream edge of the fetch in kg/m*s
*Transport = (suspension_transport + saltation_transport);
if (options.BLOWING_FETCH) {
*Transport /= fe;
}
}
return SubFlux;
}
/******************************************************************************
* @brief Calculate the transport rate for a given height above the boundary
* layer.
*****************************************************************************/
double
transport_with_height(double z,
double es,
double Wind,
double AirDens,
double ZO,
double EactAir,
double F,
double hsalt,
double phi_r,
double ushear,
double Zrh)
{
extern parameters_struct param;
/* Local variables */
double u_z;
double temp;
double phi_t;
// Find wind speed at current height
u_z = ushear * log(z / ZO) / CONST_KARMAN;
// Concentration of turbulent suspended snow Kind (1992)
temp = (0.5 * ushear * ushear) / (Wind * param.BLOWING_SETTLING);
phi_t = phi_r *
((temp +
1.) *
pow((z / hsalt),
(-1. *
param.BLOWING_SETTLING) / (CONST_KARMAN * ushear)) - temp);
return u_z * phi_t;
}
|
alifold.c | /* Last changed Time-stamp: <2009-02-24 15:17:17 ivo> */
/*
minimum free energy folding
for a set of aligned sequences
c Ivo Hofacker
Vienna RNA package
*/
/**
*** \file alifold.c
**/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include <limits.h>
#include "fold.h"
#include "utils.h"
#include "energy_par.h"
#include "fold_vars.h"
#include "pair_mat.h"
#include "params.h"
#include "ribo.h"
#include "aln_util.h"
#include "loop_energies.h"
#include "gquad.h"
#include "alifold.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#define PAREN
#define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */
#define NEW_NINIO 1 /* new asymetry penalty */
#define MAXSECTORS 500 /* dimension for a backtrack array */
#define LOCALITY 0. /* locality parameter for base-pairs */
#define UNIT 100
#define MINPSCORE -2 * UNIT
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
PUBLIC double cv_fact=1.; /* should be made static to not interfere with other threads */
PUBLIC double nc_fact=1.; /* should be made static to not interfere with other threads */
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
PRIVATE short **S = NULL;
PRIVATE short **S5 = NULL; /*S5[s][i] holds next base 5' of i in sequence s*/
PRIVATE short **S3 = NULL; /*Sl[s][i] holds next base 3' of i in sequence s*/
PRIVATE char **Ss = NULL;
PRIVATE unsigned short **a2s = NULL;
PRIVATE paramT *P = NULL;
PRIVATE int *indx = NULL; /* index for moving in the triangle matrices c[] and fMl[]*/
PRIVATE int *c = NULL; /* energy array, given that i-j pair */
PRIVATE int *cc = NULL; /* linear array for calculating canonical structures */
PRIVATE int *cc1 = NULL; /* " " */
PRIVATE int *f5 = NULL; /* energy of 5' end */
PRIVATE int *fML = NULL; /* multi-loop auxiliary energy array */
PRIVATE int *Fmi = NULL; /* holds row i of fML (avoids jumps in memory) */
PRIVATE int *DMLi = NULL; /* DMLi[j] holds MIN(fML[i,k]+fML[k+1,j]) */
PRIVATE int *DMLi1 = NULL; /* MIN(fML[i+1,k]+fML[k+1,j]) */
PRIVATE int *DMLi2 = NULL; /* MIN(fML[i+2,k]+fML[k+1,j]) */
PRIVATE int *pscore = NULL; /* precomputed array of pair types */
PRIVATE int init_length = -1;
PRIVATE sect sector[MAXSECTORS]; /* stack of partial structures for backtracking */
PRIVATE bondT *base_pair2 = NULL;
PRIVATE int circular = 0;
PRIVATE int with_gquad = 0;
PRIVATE int *ggg = NULL; /* minimum free energies of the gquadruplexes */
PRIVATE char *cons_seq = NULL;
PRIVATE short *S_cons = NULL;
#ifdef _OPENMP
#pragma omp threadprivate(S, S5, S3, Ss, a2s, P, indx, c, cc, cc1, f5, fML, Fmi, DMLi, DMLi1, DMLi2,\
pscore, init_length, sector, base_pair2,\
ggg, with_gquad, cons_seq, S_cons)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE void init_alifold(int length);
PRIVATE void get_arrays(unsigned int size);
PRIVATE void make_pscores(const short *const *S, const char **AS, int n_seq, const char *structure);
PRIVATE int fill_arrays(const char **strings);
PRIVATE void backtrack(const char **strings, int s);
PRIVATE void energy_of_alistruct_pt(const char **sequences,short * ptable, int n_seq, int *energy);
PRIVATE void stack_energy_pt(int i, const char **sequences, short *ptable, int n_seq, int *energy);
PRIVATE int ML_Energy_pt(int i, int n_seq, short *pt);
PRIVATE int EL_Energy_pt(int i, int n_seq, short *pt);
PRIVATE void en_corr_of_loop_gquad(int i,
int j,
const char **sequences,
const char *structure,
short *pt,
int *loop_idx,
int n_seq,
int en[2]);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
/* unsafe function that will be replaced by a threadsafe companion in the future */
PRIVATE void init_alifold(int length){
#ifdef _OPENMP
/* Explicitly turn off dynamic threads */
omp_set_dynamic(0);
#endif
if (length < 1) nrerror("initialize_fold: argument must be greater 0");
free_alifold_arrays();
get_arrays((unsigned) length);
init_length = length;
indx = get_indx((unsigned)length);
update_alifold_params();
}
PRIVATE void get_arrays(unsigned int size){
if(size >= (unsigned int)sqrt((double)INT_MAX))
nrerror("get_arrays@alifold.c: sequence length exceeds addressable range");
c = (int *) space(sizeof(int)*((size*(size+1))/2+2));
fML = (int *) space(sizeof(int)*((size*(size+1))/2+2));
pscore = (int *) space(sizeof(int)*((size*(size+1))/2+2));
f5 = (int *) space(sizeof(int)*(size+2));
cc = (int *) space(sizeof(int)*(size+2));
cc1 = (int *) space(sizeof(int)*(size+2));
Fmi = (int *) space(sizeof(int)*(size+1));
DMLi = (int *) space(sizeof(int)*(size+1));
DMLi1 = (int *) space(sizeof(int)*(size+1));
DMLi2 = (int *) space(sizeof(int)*(size+1));
if(base_pair2) free(base_pair2);
base_pair2 = (bondT *) space(sizeof(bondT)*(1+size/2));
}
PUBLIC void free_alifold_arrays(void){
if(indx) free(indx);
if(c) free(c);
if(fML) free(fML);
if(f5) free(f5);
if(cc) free(cc);
if(cc1) free(cc1);
if(pscore) free(pscore);
if(base_pair2) free(base_pair2);
if(Fmi) free(Fmi);
if(DMLi) free(DMLi);
if(DMLi1) free(DMLi1);
if(DMLi2) free(DMLi2);
if(P) free(P);
if(ggg) free(ggg);
if(cons_seq) free(cons_seq);
if(S_cons) free(S_cons);
indx = c = fML = f5 = cc = cc1 = Fmi = DMLi = DMLi1 = DMLi2 = ggg = NULL;
pscore = NULL;
base_pair = NULL;
base_pair2 = NULL;
P = NULL;
init_length = 0;
cons_seq = NULL;
S_cons = NULL;
}
PUBLIC void alloc_sequence_arrays(const char **sequences, short ***S, short ***S5, short ***S3, unsigned short ***a2s, char ***Ss, int circ){
unsigned int s, n_seq, length;
if(sequences[0] != NULL){
length = strlen(sequences[0]);
for (s=0; sequences[s] != NULL; s++);
n_seq = s;
*S = (short **) space((n_seq+1) * sizeof(short *));
*S5 = (short **) space((n_seq+1) * sizeof(short *));
*S3 = (short **) space((n_seq+1) * sizeof(short *));
*a2s = (unsigned short **) space((n_seq+1) * sizeof(unsigned short *));
*Ss = (char **) space((n_seq+1) * sizeof(char *));
for (s=0; s<n_seq; s++) {
if(strlen(sequences[s]) != length) nrerror("uneqal seqence lengths");
(*S5)[s] = (short *) space((length + 2) * sizeof(short));
(*S3)[s] = (short *) space((length + 2) * sizeof(short));
(*a2s)[s] = (unsigned short *)space((length + 2) * sizeof(unsigned short));
(*Ss)[s] = (char *) space((length + 2) * sizeof(char));
(*S)[s] = (short *) space((length + 2) * sizeof(short));
encode_ali_sequence(sequences[s], (*S)[s], (*S5)[s], (*S3)[s], (*Ss)[s], (*a2s)[s], circ);
}
(*S5)[n_seq] = NULL;
(*S3)[n_seq] = NULL;
(*a2s)[n_seq] = NULL;
(*Ss)[n_seq] = NULL;
(*S)[n_seq] = NULL;
}
else nrerror("alloc_sequence_arrays: no sequences in the alignment!");
}
PUBLIC void free_sequence_arrays(unsigned int n_seq, short ***S, short ***S5, short ***S3, unsigned short ***a2s, char ***Ss){
unsigned int s;
for (s=0; s<n_seq; s++) {
free((*S)[s]);
free((*S5)[s]);
free((*S3)[s]);
free((*a2s)[s]);
free((*Ss)[s]);
}
free(*S); *S = NULL;
free(*S5); *S5 = NULL;
free(*S3); *S3 = NULL;
free(*a2s); *a2s = NULL;
free(*Ss); *Ss = NULL;
}
PUBLIC void update_alifold_params(void){
if(P) free(P);
P = scale_parameters();
make_pair_matrix();
if (init_length < 0) init_length=0;
}
PUBLIC float alifold(const char **strings, char *structure){
int length, energy, s, n_seq;
circular = 0;
length = (int) strlen(strings[0]);
#ifdef _OPENMP
/* always init everything since all global static variables are uninitialized when entering a thread */
init_alifold(length);
#else
if (length>init_length) init_alifold(length);
#endif
if (fabs(P->temperature - temperature)>1e-6) update_alifold_params();
for (s=0; strings[s]!=NULL; s++);
n_seq = s;
with_gquad = P->model_details.gquad;
if(with_gquad){ /* add a guess of how many G's may be involved in a G quadruplex */
if(base_pair2)
free(base_pair2);
base_pair2 = (bondT *) space(sizeof(bondT)*(4*(1+length/2)));
}
alloc_sequence_arrays(strings, &S, &S5, &S3, &a2s, &Ss, circular);
make_pscores((const short **) S, strings, n_seq, structure);
energy = fill_arrays((const char **)strings);
backtrack((const char **)strings, 0);
#ifdef PAREN
parenthesis_structure(structure, base_pair2, length);
#else
letter_structure(structure, base_pair2, length);
#endif
/*
* Backward compatibility:
* This block may be removed if deprecated functions
* relying on the global variable "base_pair" vanishs from within the package!
*/
base_pair = base_pair2;
/*
{
if(base_pair) free(base_pair);
base_pair = (bondT *)space(sizeof(bondT) * (1+length/2));
memcpy(base_pair, base_pair2, sizeof(bondT) * (1+length/2));
}
*/
free_sequence_arrays(n_seq, &S, &S5, &S3, &a2s, &Ss);
if (backtrack_type=='C')
return (float) c[indx[length]+1]/(n_seq*100.);
else if (backtrack_type=='M')
return (float) fML[indx[length]+1]/(n_seq*100.);
else
return (float) f5[length]/(n_seq*100.);
}
/**
*** the actual forward recursion to fill the energy arrays
**/
PRIVATE int fill_arrays(const char **strings) {
int i, j, k, p, q, length, energy, new_c;
int decomp, MLenergy, new_fML;
int s, n_seq, *type, type_2, tt;
/* count number of sequences */
for (n_seq=0; strings[n_seq]!=NULL; n_seq++);
type = (int *) space(n_seq*sizeof(int));
length = strlen(strings[0]);
/* init energies */
if(with_gquad){
cons_seq = consensus(strings);
/* make g-island annotation of the consensus */
S_cons = encode_sequence(cons_seq, 0);
ggg = get_gquad_ali_matrix(S_cons, S, n_seq, P);
}
for (j=1; j<=length; j++){
Fmi[j]=DMLi[j]=DMLi1[j]=DMLi2[j]=INF;
for (i=(j>TURN?(j-TURN):1); i<j; i++) {
c[indx[j]+i] = fML[indx[j]+i] = INF;
}
}
/* begin recursions */
for (i = length-TURN-1; i >= 1; i--) { /* i,j in [1..length] */
for (j = i+TURN+1; j <= length; j++) {
int ij, psc, l1, maxq, minq, up, c0;
ij = indx[j]+i;
for (s=0; s<n_seq; s++) {
type[s] = pair[S[s][i]][S[s][j]];
if (type[s]==0) type[s]=7;
}
psc = pscore[indx[j]+i];
if (psc>=MINPSCORE) { /* a pair to consider */
int stackEnergy = INF;
/* hairpin ----------------------------------------------*/
for (new_c=s=0; s<n_seq; s++) {
if ((a2s[s][j-1]-a2s[s][i])<3) new_c+=600;
else new_c += E_Hairpin(a2s[s][j-1]-a2s[s][i],type[s],S3[s][i],S5[s][j],Ss[s]+(a2s[s][i-1]), P);
}
/*--------------------------------------------------------
check for elementary structures involving more than one
closing pair.
--------------------------------------------------------*/
for (p = i+1; p <= MIN2(j-2-TURN,i+MAXLOOP+1) ; p++) {
minq = j-i+p-MAXLOOP-2;
if (minq<p+1+TURN) minq = p+1+TURN;
for (q = minq; q < j; q++) {
if (pscore[indx[q]+p]<MINPSCORE) continue;
for (energy = s=0; s<n_seq; s++) {
type_2 = pair[S[s][q]][S[s][p]]; /* q,p not p,q! */
if (type_2 == 0) type_2 = 7;
energy += E_IntLoop(a2s[s][p-1]-a2s[s][i], a2s[s][j-1]-a2s[s][q], type[s], type_2,
S3[s][i], S5[s][j],
S5[s][p], S3[s][q], P);
}
new_c = MIN2(new_c, energy + c[indx[q]+p]);
if ((p==i+1)&&(j==q+1)) stackEnergy = energy; /* remember stack energy */
} /* end q-loop */
} /* end p-loop */
/* multi-loop decomposition ------------------------*/
decomp = DMLi1[j-1];
if(dangles){
for(s=0; s<n_seq; s++){
tt = rtype[type[s]];
decomp += E_MLstem(tt, S5[s][j], S3[s][i], P);
}
}
else{
for(s=0; s<n_seq; s++){
tt = rtype[type[s]];
decomp += E_MLstem(tt, -1, -1, P);
}
}
MLenergy = decomp + n_seq*P->MLclosing;
new_c = MIN2(new_c, MLenergy);
if(with_gquad){
decomp = 0;
for(s=0;s<n_seq;s++){
tt = type[s];
if(dangles == 2)
decomp += P->mismatchI[tt][S3[s][i]][S5[s][j]];
if(tt > 2)
decomp += P->TerminalAU;
}
for(p = i + 2; p < j - VRNA_GQUAD_MIN_BOX_SIZE; p++){
l1 = p - i - 1;
if(l1>MAXLOOP) break;
if(S_cons[p] != 3) continue;
minq = j - i + p - MAXLOOP - 2;
c0 = p + VRNA_GQUAD_MIN_BOX_SIZE - 1;
minq = MAX2(c0, minq);
c0 = j - 1;
maxq = p + VRNA_GQUAD_MAX_BOX_SIZE + 1;
maxq = MIN2(c0, maxq);
for(q = minq; q < maxq; q++){
if(S_cons[q] != 3) continue;
c0 = decomp + ggg[indx[q] + p] + n_seq * P->internal_loop[l1 + j - q - 1];
new_c = MIN2(new_c, c0);
}
}
p = i + 1;
if(S_cons[p] == 3){
if(p < j - VRNA_GQUAD_MIN_BOX_SIZE){
minq = j - i + p - MAXLOOP - 2;
c0 = p + VRNA_GQUAD_MIN_BOX_SIZE - 1;
minq = MAX2(c0, minq);
c0 = j - 3;
maxq = p + VRNA_GQUAD_MAX_BOX_SIZE + 1;
maxq = MIN2(c0, maxq);
for(q = minq; q < maxq; q++){
if(S_cons[q] != 3) continue;
c0 = decomp + ggg[indx[q] + p] + n_seq * P->internal_loop[j - q - 1];
new_c = MIN2(new_c, c0);
}
}
}
q = j - 1;
if(S_cons[q] == 3)
for(p = i + 4; p < j - VRNA_GQUAD_MIN_BOX_SIZE; p++){
l1 = p - i - 1;
if(l1>MAXLOOP) break;
if(S_cons[p] != 3) continue;
c0 = decomp + ggg[indx[q] + p] + n_seq * P->internal_loop[l1];
new_c = MIN2(new_c, c0);
}
}
new_c = MIN2(new_c, cc1[j-1]+stackEnergy);
cc[j] = new_c - psc; /* add covariance bonnus/penalty */
if (noLonelyPairs)
c[ij] = cc1[j-1]+stackEnergy-psc;
else
c[ij] = cc[j];
} /* end >> if (pair) << */
else c[ij] = INF;
/* done with c[i,j], now compute fML[i,j] */
/* free ends ? -----------------------------------------*/
new_fML = fML[ij+1]+n_seq*P->MLbase;
new_fML = MIN2(fML[indx[j-1]+i]+n_seq*P->MLbase, new_fML);
energy = c[ij];
if(dangles){
for (s=0; s<n_seq; s++) {
energy += E_MLstem(type[s], S5[s][i], S3[s][j], P);
}
}
else{
for (s=0; s<n_seq; s++) {
energy += E_MLstem(type[s], -1, -1, P);
}
}
new_fML = MIN2(energy, new_fML);
if(with_gquad){
decomp = ggg[indx[j] + i] + n_seq * E_MLstem(0, -1, -1, P);
new_fML = MIN2(new_fML, decomp);
}
/* modular decomposition -------------------------------*/
for (decomp = INF, k = i+1+TURN; k <= j-2-TURN; k++)
decomp = MIN2(decomp, Fmi[k]+fML[indx[j]+k+1]);
DMLi[j] = decomp; /* store for use in ML decompositon */
new_fML = MIN2(new_fML,decomp);
/* coaxial stacking deleted */
fML[ij] = Fmi[j] = new_fML; /* substring energy */
} /* END for j */
{
int *FF; /* rotate the auxilliary arrays */
FF = DMLi2; DMLi2 = DMLi1; DMLi1 = DMLi; DMLi = FF;
FF = cc1; cc1=cc; cc=FF;
for (j=1; j<=length; j++) {cc[j]=Fmi[j]=DMLi[j]=INF; }
}
} /* END for i */
/* calculate energies of 5' and 3' fragments */
f5[TURN + 1] = 0;
switch(dangles){
case 0: for(j = TURN + 2; j <= length; j++){
f5[j] = f5[j-1];
if (c[indx[j]+1]<INF){
energy = c[indx[j]+1];
for(s = 0; s < n_seq; s++){
tt = pair[S[s][1]][S[s][j]];
if(tt==0) tt=7;
energy += E_ExtLoop(tt, -1, -1, P);
}
f5[j] = MIN2(f5[j], energy);
}
if(with_gquad){
if(ggg[indx[j]+1] < INF)
f5[j] = MIN2(f5[j], ggg[indx[j]+1]);
}
for(i = j - TURN - 1; i > 1; i--){
if(c[indx[j]+i]<INF){
energy = f5[i-1] + c[indx[j]+i];
for(s = 0; s < n_seq; s++){
tt = pair[S[s][i]][S[s][j]];
if(tt==0) tt=7;
energy += E_ExtLoop(tt, -1, -1, P);
}
f5[j] = MIN2(f5[j], energy);
}
if(with_gquad){
if(ggg[indx[j]+i] < INF)
f5[j] = MIN2(f5[j], f5[i-1] + ggg[indx[j]+i]);
}
}
}
break;
default: for(j = TURN + 2; j <= length; j++){
f5[j] = f5[j-1];
if (c[indx[j]+1]<INF) {
energy = c[indx[j]+1];
for(s = 0; s < n_seq; s++){
tt = pair[S[s][1]][S[s][j]];
if(tt==0) tt=7;
energy += E_ExtLoop(tt, -1, (j<length) ? S3[s][j] : -1, P);
}
f5[j] = MIN2(f5[j], energy);
}
if(with_gquad){
if(ggg[indx[j]+1] < INF)
f5[j] = MIN2(f5[j], ggg[indx[j]+1]);
}
for(i = j - TURN - 1; i > 1; i--){
if (c[indx[j]+i]<INF) {
energy = f5[i-1] + c[indx[j]+i];
for(s = 0; s < n_seq; s++){
tt = pair[S[s][i]][S[s][j]];
if(tt==0) tt=7;
energy += E_ExtLoop(tt, S5[s][i], (j < length) ? S3[s][j] : -1, P);
}
f5[j] = MIN2(f5[j], energy);
}
if(with_gquad){
if(ggg[indx[j]+i] < INF)
f5[j] = MIN2(f5[j], f5[i-1] + ggg[indx[j]+i]);
}
}
}
break;
}
free(type);
return(f5[length]);
}
#include "alicircfold.inc"
/**
*** backtrack in the energy matrices to obtain a structure with MFE
**/
PRIVATE void backtrack(const char **strings, int s) {
/*------------------------------------------------------------------
trace back through the "c", "f5" and "fML" arrays to get the
base pairing list. No search for equivalent structures is done.
This inverts the folding procedure, hence it's very fast.
------------------------------------------------------------------*/
/* normally s=0.
If s>0 then s items have been already pushed onto the sector stack */
int i, j, k, p, q, length, energy, up, c0, l1, minq, maxq;
int type_2, tt, mm;
int b=0, cov_en = 0;
int n_seq;
int *type;
length = strlen(strings[0]);
for (n_seq=0; strings[n_seq]!=NULL; n_seq++);
type = (int *) space(n_seq*sizeof(int));
if (s==0) {
sector[++s].i = 1;
sector[s].j = length;
sector[s].ml = (backtrack_type=='M') ? 1 : ((backtrack_type=='C')?2:0);
}
while (s>0) {
int ss, ml, fij, fi, cij, traced, i1, j1, d3, d5, jj=0, gq=0;
int canonical = 1; /* (i,j) closes a canonical structure */
i = sector[s].i;
j = sector[s].j;
ml = sector[s--].ml; /* ml is a flag indicating if backtracking is to
occur in the fML- (1) or in the f-array (0) */
if (ml==2) {
base_pair2[++b].i = i;
base_pair2[b].j = j;
cov_en += pscore[indx[j]+i];
goto repeat1;
}
if (j < i+TURN+1) continue; /* no more pairs in this interval */
fij = (ml)? fML[indx[j]+i] : f5[j];
fi = (ml)?(fML[indx[j-1]+i]+n_seq*P->MLbase):f5[j-1];
if (fij == fi) { /* 3' end is unpaired */
sector[++s].i = i;
sector[s].j = j-1;
sector[s].ml = ml;
continue;
}
if (ml == 0) { /* backtrack in f5 */
switch(dangles){
case 0: /* j or j-1 is paired. Find pairing partner */
for (i=j-TURN-1,traced=0; i>=1; i--) {
int cc, en;
jj = i-1;
if (c[indx[j]+i]<INF) {
en = c[indx[j]+i] + f5[i-1];
for(ss = 0; ss < n_seq; ss++){
type[ss] = pair[S[ss][i]][S[ss][j]];
if (type[ss]==0) type[ss] = 7;
en += E_ExtLoop(type[ss], -1, -1, P);
}
if (fij == en) traced=j;
}
if(with_gquad){
if(fij == f5[i-1] + ggg[indx[j]+i]){
/* found the decomposition */
traced = j; jj = i - 1; gq = 1;
break;
}
}
if (traced) break;
}
break;
default: /* j or j-1 is paired. Find pairing partner */
for (i=j-TURN-1,traced=0; i>=1; i--) {
int cc, en;
jj = i-1;
if (c[indx[j]+i]<INF) {
en = c[indx[j]+i] + f5[i-1];
for(ss = 0; ss < n_seq; ss++){
type[ss] = pair[S[ss][i]][S[ss][j]];
if (type[ss]==0) type[ss] = 7;
en += E_ExtLoop(type[ss], (i>1) ? S5[ss][i]: -1, (j < length) ? S3[ss][j] : -1, P);
}
if (fij == en) traced=j;
}
if(with_gquad){
if(fij == f5[i-1] + ggg[indx[j]+i]){
/* found the decomposition */
traced = j; jj = i - 1; gq = 1;
break;
}
}
if (traced) break;
}
break;
}
if (!traced) nrerror("backtrack failed in f5");
/* push back the remaining f5 portion */
sector[++s].i = 1;
sector[s].j = jj;
sector[s].ml = ml;
/* trace back the base pair found */
j=traced;
if(with_gquad && gq){
/* goto backtrace of gquadruplex */
goto repeat_gquad;
}
base_pair2[++b].i = i;
base_pair2[b].j = j;
cov_en += pscore[indx[j]+i];
goto repeat1;
}
else { /* trace back in fML array */
if (fML[indx[j]+i+1]+n_seq*P->MLbase == fij) { /* 5' end is unpaired */
sector[++s].i = i+1;
sector[s].j = j;
sector[s].ml = ml;
continue;
}
if(with_gquad){
if(fij == ggg[indx[j]+i] + n_seq * E_MLstem(0, -1, -1, P)){
/* go to backtracing of quadruplex */
goto repeat_gquad;
}
}
cij = c[indx[j]+i];
if(dangles){
for(ss = 0; ss < n_seq; ss++){
tt = pair[S[ss][i]][S[ss][j]];
if(tt==0) tt=7;
cij += E_MLstem(tt, S5[ss][i], S3[ss][j], P);
}
}
else{
for(ss = 0; ss < n_seq; ss++){
tt = pair[S[ss][i]][S[ss][j]];
if(tt==0) tt=7;
cij += E_MLstem(tt, -1, -1, P);
}
}
if (fij==cij){
/* found a pair */
base_pair2[++b].i = i;
base_pair2[b].j = j;
cov_en += pscore[indx[j]+i];
goto repeat1;
}
for (k = i+1+TURN; k <= j-2-TURN; k++)
if (fij == (fML[indx[k]+i]+fML[indx[j]+k+1]))
break;
sector[++s].i = i;
sector[s].j = k;
sector[s].ml = ml;
sector[++s].i = k+1;
sector[s].j = j;
sector[s].ml = ml;
if (k>j-2-TURN) nrerror("backtrack failed in fML");
continue;
}
repeat1:
/*----- begin of "repeat:" -----*/
if (canonical) cij = c[indx[j]+i];
for (ss=0; ss<n_seq; ss++) {
type[ss] = pair[S[ss][i]][S[ss][j]];
if (type[ss]==0) type[ss] = 7;
}
if (noLonelyPairs)
if (cij == c[indx[j]+i]) {
/* (i.j) closes canonical structures, thus
(i+1.j-1) must be a pair */
for (ss=0; ss<n_seq; ss++) {
type_2 = pair[S[ss][j-1]][S[ss][i+1]]; /* j,i not i,j */
if (type_2==0) type_2 = 7;
cij -= P->stack[type[ss]][type_2];
}
cij += pscore[indx[j]+i];
base_pair2[++b].i = i+1;
base_pair2[b].j = j-1;
cov_en += pscore[indx[j-1]+i+1];
i++; j--;
canonical=0;
goto repeat1;
}
canonical = 1;
cij += pscore[indx[j]+i];
{int cc=0;
for (ss=0; ss<n_seq; ss++) {
if ((a2s[ss][j-1]-a2s[ss][i])<3) cc+=600;
else cc += E_Hairpin(a2s[ss][j-1]-a2s[ss][i], type[ss], S3[ss][i], S5[ss][j], Ss[ss]+a2s[ss][i-1], P);
}
if (cij == cc) /* found hairpin */
continue;
}
for (p = i+1; p <= MIN2(j-2-TURN,i+MAXLOOP+1); p++) {
minq = j-i+p-MAXLOOP-2;
if (minq<p+1+TURN) minq = p+1+TURN;
for (q = j-1; q >= minq; q--) {
if (c[indx[q]+p]>=INF) continue;
for (ss=energy=0; ss<n_seq; ss++) {
type_2 = pair[S[ss][q]][S[ss][p]]; /* q,p not p,q */
if (type_2==0) type_2 = 7;
energy += E_IntLoop(a2s[ss][p-1]-a2s[ss][i],a2s[ss][j-1]-a2s[ss][q],
type[ss], type_2,
S3[ss][i], S5[ss][j],
S5[ss][p], S3[ss][q], P);
}
traced = (cij == energy+c[indx[q]+p]);
if (traced) {
base_pair2[++b].i = p;
base_pair2[b].j = q;
cov_en += pscore[indx[q]+p];
i = p, j = q;
goto repeat1;
}
}
}
/* end of repeat: --------------------------------------------------*/
/* (i.j) must close a multi-loop */
i1 = i+1;
j1 = j-1;
if(with_gquad){
/*
The case that is handled here actually resembles something like
an interior loop where the enclosing base pair is of regular
kind and the enclosed pair is not a canonical one but a g-quadruplex
that should then be decomposed further...
*/
mm = 0;
for(ss=0;ss<n_seq;ss++){
tt = type[ss];
if(tt == 0) tt = 7;
if(dangles == 2)
mm += P->mismatchI[tt][S3[ss][i]][S5[ss][j]];
if(tt > 2)
mm += P->TerminalAU;
}
for(p = i + 2;
p < j - VRNA_GQUAD_MIN_BOX_SIZE;
p++){
if(S_cons[p] != 3) continue;
l1 = p - i - 1;
if(l1>MAXLOOP) break;
minq = j - i + p - MAXLOOP - 2;
c0 = p + VRNA_GQUAD_MIN_BOX_SIZE - 1;
minq = MAX2(c0, minq);
c0 = j - 1;
maxq = p + VRNA_GQUAD_MAX_BOX_SIZE + 1;
maxq = MIN2(c0, maxq);
for(q = minq; q < maxq; q++){
if(S_cons[q] != 3) continue;
c0 = mm + ggg[indx[q] + p] + n_seq * P->internal_loop[l1 + j - q - 1];
if(cij == c0){
i=p;j=q;
goto repeat_gquad;
}
}
}
p = i1;
if(S_cons[p] == 3){
if(p < j - VRNA_GQUAD_MIN_BOX_SIZE){
minq = j - i + p - MAXLOOP - 2;
c0 = p + VRNA_GQUAD_MIN_BOX_SIZE - 1;
minq = MAX2(c0, minq);
c0 = j - 3;
maxq = p + VRNA_GQUAD_MAX_BOX_SIZE + 1;
maxq = MIN2(c0, maxq);
for(q = minq; q < maxq; q++){
if(S_cons[q] != 3) continue;
if(cij == mm + ggg[indx[q] + p] + n_seq * P->internal_loop[j - q - 1]){
i = p; j=q;
goto repeat_gquad;
}
}
}
}
q = j1;
if(S_cons[q] == 3)
for(p = i1 + 3; p < j - VRNA_GQUAD_MIN_BOX_SIZE; p++){
l1 = p - i - 1;
if(l1>MAXLOOP) break;
if(S_cons[p] != 3) continue;
if(cij == mm + ggg[indx[q] + p] + n_seq * P->internal_loop[l1]){
i = p; j = q;
goto repeat_gquad;
}
}
}
mm = n_seq*P->MLclosing;
if(dangles){
for(ss = 0; ss < n_seq; ss++){
tt = rtype[type[ss]];
mm += E_MLstem(tt, S5[ss][j], S3[ss][i], P);
}
}
else{
for(ss = 0; ss < n_seq; ss++){
tt = rtype[type[ss]];
mm += E_MLstem(tt, -1, -1, P);
}
}
sector[s+1].ml = sector[s+2].ml = 1;
for (k = i1+TURN+1; k < j1-TURN-1; k++){
if(cij == fML[indx[k]+i1] + fML[indx[j1]+k+1] + mm) break;
}
if (k<=j-3-TURN) { /* found the decomposition */
sector[++s].i = i1;
sector[s].j = k;
sector[++s].i = k+1;
sector[s].j = j1;
} else {
nrerror("backtracking failed in repeat");
}
continue; /* this is a workarround to not accidentally proceed in the following block */
repeat_gquad:
/*
now we do some fancy stuff to backtrace the stacksize and linker lengths
of the g-quadruplex that should reside within position i,j
*/
{
int cnt1, cnt2, cnt3, cnt4, l[3], L, size;
size = j-i+1;
for(L=0; L < VRNA_GQUAD_MIN_STACK_SIZE;L++){
if(S_cons[i+L] != 3) break;
if(S_cons[j-L] != 3) break;
}
if(L == VRNA_GQUAD_MIN_STACK_SIZE){
/* continue only if minimum stack size starting from i is possible */
for(; L<=VRNA_GQUAD_MAX_STACK_SIZE;L++){
if(S_cons[i+L-1] != 3) break; /* break if no more consecutive G's 5' */
if(S_cons[j-L+1] != 3) break; /* break if no more consecutive G'1 3' */
for( l[0] = VRNA_GQUAD_MIN_LINKER_LENGTH;
(l[0] <= VRNA_GQUAD_MAX_LINKER_LENGTH)
&& (size - 4*L - 2*VRNA_GQUAD_MIN_LINKER_LENGTH - l[0] >= 0);
l[0]++){
/* check whether we find the second stretch of consecutive G's */
for(cnt1 = 0; (cnt1 < L) && (S_cons[i+L+l[0]+cnt1] == 3); cnt1++);
if(cnt1 < L) continue;
for( l[1] = VRNA_GQUAD_MIN_LINKER_LENGTH;
(l[1] <= VRNA_GQUAD_MAX_LINKER_LENGTH)
&& (size - 4*L - VRNA_GQUAD_MIN_LINKER_LENGTH - l[0] - l[1] >= 0);
l[1]++){
/* check whether we find the third stretch of consectutive G's */
for(cnt1 = 0; (cnt1 < L) && (S_cons[i+2*L+l[0]+l[1]+cnt1] == 3); cnt1++);
if(cnt1 < L) continue;
/*
the length of the third linker now depends on position j as well
as the other linker lengths... so we do not have to loop too much
*/
l[2] = size - 4*L - l[0] - l[1];
if(l[2] < VRNA_GQUAD_MIN_LINKER_LENGTH) break;
if(l[2] > VRNA_GQUAD_MAX_LINKER_LENGTH) continue;
/* check for contribution */
if(ggg[indx[j]+i] == E_gquad_ali(i, L, l, (const short **)S, n_seq, P)){
int a;
/* fill the G's of the quadruplex into base_pair2 */
for(a=0;a<L;a++){
base_pair2[++b].i = i+a;
base_pair2[b].j = i+a;
base_pair2[++b].i = i+L+l[0]+a;
base_pair2[b].j = i+L+l[0]+a;
base_pair2[++b].i = i+L+l[0]+L+l[1]+a;
base_pair2[b].j = i+L+l[0]+L+l[1]+a;
base_pair2[++b].i = i+L+l[0]+L+l[1]+L+l[2]+a;
base_pair2[b].j = i+L+l[0]+L+l[1]+L+l[2]+a;
}
goto repeat_gquad_exit;
}
}
}
}
}
nrerror("backtracking failed in repeat_gquad");
}
repeat_gquad_exit:
asm("nop");
}
/* fprintf(stderr, "covariance energy %6.2f\n", cov_en/100.); */
base_pair2[0].i = b; /* save the total number of base pairs */
free(type);
}
PUBLIC void encode_ali_sequence(const char *sequence, short *S, short *s5, short *s3, char *ss, unsigned short *as, int circular){
unsigned int i,l;
unsigned short p;
l = strlen(sequence);
S[0] = (short) l;
s5[0] = s5[1] = 0;
/* make numerical encoding of sequence */
for(i=1; i<=l; i++){
short ctemp;
ctemp=(short) encode_char(toupper(sequence[i-1]));
S[i]= ctemp ;
}
if (oldAliEn){
/* use alignment sequences in all energy evaluations */
ss[0]=sequence[0];
for(i=1; i<l; i++){
s5[i] = S[i-1];
s3[i] = S[i+1];
ss[i] = sequence[i];
as[i] = i;
}
ss[l] = sequence[l];
as[l] = l;
s5[l] = S[l-1];
s3[l] = 0;
S[l+1] = S[1];
s5[1] = 0;
if (circular) {
s5[1] = S[l];
s3[l] = S[1];
ss[l+1] = S[1];
}
}
else{
if(circular){
for(i=l; i>0; i--){
char c5;
c5 = sequence[i-1];
if ((c5=='-')||(c5=='_')||(c5=='~')||(c5=='.')) continue;
s5[1] = S[i];
break;
}
for (i=1; i<=l; i++) {
char c3;
c3 = sequence[i-1];
if ((c3=='-')||(c3=='_')||(c3=='~')||(c3=='.')) continue;
s3[l] = S[i];
break;
}
}
else s5[1]=s3[l]=0;
for(i=1,p=0; i<=l; i++){
char c5;
c5 = sequence[i-1];
if ((c5=='-')||(c5=='_')||(c5=='~')||(c5=='.'))
s5[i+1]=s5[i];
else { /* no gap */
ss[p++]=sequence[i-1]; /*start at 0!!*/
s5[i+1]=S[i];
}
as[i]=p;
}
for (i=l; i>=1; i--) {
char c3;
c3 = sequence[i-1];
if ((c3=='-')||(c3=='_')||(c3=='~')||(c3=='.'))
s3[i-1]=s3[i];
else
s3[i-1]=S[i];
}
}
}
PRIVATE void make_pscores(const short *const* S, const char **AS,
int n_seq, const char *structure) {
/* calculate co-variance bonus for each pair depending on */
/* compensatory/consistent mutations and incompatible seqs */
/* should be 0 for conserved pairs, >0 for good pairs */
#define NONE -10000 /* score for forbidden pairs */
int n,i,j,k,l,s;
int olddm[7][7]={{0,0,0,0,0,0,0}, /* hamming distance between pairs */
{0,0,2,2,1,2,2} /* CG */,
{0,2,0,1,2,2,2} /* GC */,
{0,2,1,0,2,1,2} /* GU */,
{0,1,2,2,0,2,1} /* UG */,
{0,2,2,1,2,0,2} /* AU */,
{0,2,2,2,1,2,0} /* UA */};
float **dm;
n=S[0][0]; /* length of seqs */
if (ribo) {
if (RibosumFile !=NULL) dm=readribosum(RibosumFile);
else dm=get_ribosum(AS,n_seq,n);
}
else { /*use usual matrix*/
dm=(float **)space(7*sizeof(float*));
for (i=0; i<7;i++) {
dm[i]=(float *)space(7*sizeof(float));
for (j=0; j<7; j++)
dm[i][j] = (float) olddm[i][j];
}
}
for (i=1; i<n; i++) {
for (j=i+1; (j<i+TURN+1) && (j<=n); j++)
pscore[indx[j]+i] = NONE;
for (j=i+TURN+1; j<=n; j++) {
int pfreq[8]={0,0,0,0,0,0,0,0};
double score;
for (s=0; s<n_seq; s++) {
int type;
if (S[s][i]==0 && S[s][j]==0) type = 7; /* gap-gap */
else {
if ((AS[s][i] == '~')||(AS[s][j] == '~')) type = 7;
else type = pair[S[s][i]][S[s][j]];
}
pfreq[type]++;
}
if (pfreq[0]*2+pfreq[7]>n_seq) { pscore[indx[j]+i] = NONE; continue;}
for (k=1,score=0; k<=6; k++) /* ignore pairtype 7 (gap-gap) */
for (l=k; l<=6; l++)
score += pfreq[k]*pfreq[l]*dm[k][l];
/* counter examples score -1, gap-gap scores -0.25 */
pscore[indx[j]+i] = cv_fact *
((UNIT*score)/n_seq - nc_fact*UNIT*(pfreq[0] + pfreq[7]*0.25));
}
}
if (noLonelyPairs) /* remove unwanted pairs */
for (k=1; k<n-TURN-1; k++)
for (l=1; l<=2; l++) {
int type,ntype=0,otype=0;
i=k; j = i+TURN+l;
type = pscore[indx[j]+i];
while ((i>=1)&&(j<=n)) {
if ((i>1)&&(j<n)) ntype = pscore[indx[j+1]+i-1];
if ((otype<cv_fact*MINPSCORE)&&(ntype<cv_fact*MINPSCORE)) /* too many counterexamples */
pscore[indx[j]+i] = NONE; /* i.j can only form isolated pairs */
otype = type;
type = ntype;
i--; j++;
}
}
if (fold_constrained&&(structure!=NULL)) {
int psij, hx, hx2, *stack, *stack2;
stack = (int *) space(sizeof(int)*(n+1));
stack2 = (int *) space(sizeof(int)*(n+1));
for(hx=hx2=0, j=1; j<=n; j++) {
switch (structure[j-1]) {
case 'x': /* can't pair */
for (l=1; l<j-TURN; l++) pscore[indx[j]+l] = NONE;
for (l=j+TURN+1; l<=n; l++) pscore[indx[l]+j] = NONE;
break;
case '(':
stack[hx++]=j;
/* fallthrough */
case '[':
stack2[hx2++]=j;
/* fallthrough */
case '<': /* pairs upstream */
for (l=1; l<j-TURN; l++) pscore[indx[j]+l] = NONE;
break;
case ']':
if (hx2<=0) {
fprintf(stderr, "%s\n", structure);
nrerror("unbalanced brackets in constraints");
}
i = stack2[--hx2];
pscore[indx[j]+i]=NONE;
break;
case ')':
if (hx<=0) {
fprintf(stderr, "%s\n", structure);
nrerror("unbalanced brackets in constraints");
}
i = stack[--hx];
psij = pscore[indx[j]+i]; /* store for later */
for (k=j; k<=n; k++)
for (l=i; l<=j; l++)
pscore[indx[k]+l] = NONE;
for (l=i; l<=j; l++)
for (k=1; k<=i; k++)
pscore[indx[l]+k] = NONE;
for (k=i+1; k<j; k++)
pscore[indx[k]+i] = pscore[indx[j]+k] = NONE;
pscore[indx[j]+i] = (psij>0) ? psij : 0;
/* fallthrough */
case '>': /* pairs downstream */
for (l=j+TURN+1; l<=n; l++) pscore[indx[l]+j] = NONE;
break;
}
}
if (hx!=0) {
fprintf(stderr, "%s\n", structure);
nrerror("unbalanced brackets in constraint string");
}
free(stack); free(stack2);
}
/*free dm */
for (i=0; i<7;i++) {
free(dm[i]);
}
free(dm);
}
/*--------New scoring part-----------------------------------*/
PUBLIC int get_mpi(char *Alseq[], int n_seq, int length, int *mini) {
int i, j,k;
float ident=0;
int pairnum=0;
int sumident=0;
float minimum=1.;
for(j=0; j<n_seq-1; j++)
for(k=j+1; k<n_seq; k++) {
ident=0;
for (i=1; i<=length; i++){
if (Alseq[k][i]==Alseq[j][i]) ident++;
pairnum++;
}
if ((ident/length)<minimum) minimum=ident/(float)length;
sumident+=ident;
}
mini[0]=(int)(minimum*100.);
if (pairnum>0) return (int) (sumident*100/pairnum);
else return 0;
}
PUBLIC float **readribosum(char *name){
float **dm;
char *line;
FILE *fp;
int i=0;
int who=0;
float a,b,c,d,e,f;
int translator[7]={0,5,1,2,3,6,4};
fp=fopen(name,"r");
dm=(float **)space(7*sizeof(float*));
for (i=0; i<7;i++) {
dm[i]=(float *)space(7*sizeof(float));
}
while(1) { /*bisma hoit fertisch san*/
line=get_line(fp);
if (*line=='#') continue;
i=0;
i=sscanf(line,"%f %f %f %f %f %f",&a,&b,&c,&d,&e,&f);
if (i==0) break;
dm[translator[++who]][translator[1]]=a;
dm[translator[who]][translator[2]]=b;
dm[translator[who]][translator[3]]=c;
dm[translator[who]][translator[4]]=d;
dm[translator[who]][translator[5]]=e;
dm[translator[who]][translator[6]]=f;
free(line);
if (who==6) break;
}
fclose(fp);
return dm;
}
PRIVATE void en_corr_of_loop_gquad(int i,
int j,
const char **sequences,
const char *structure,
short *pt,
int *loop_idx,
int n_seq,
int en[2]){
int pos, energy, en_covar, p, q, r, s, u, type, type2, gq_en[2];
int L, l[3];
energy = en_covar = 0;
q = i;
while((pos = parse_gquad(structure + q-1, &L, l)) > 0){
q += pos-1;
p = q - 4*L - l[0] - l[1] - l[2] + 1;
if(q > j) break;
/* we've found the first g-quadruplex at position [p,q] */
E_gquad_ali_en(p, L, l, (const short **)S, n_seq, gq_en, P);
energy += gq_en[0];
en_covar += gq_en[1];
/* check if it's enclosed in a base pair */
if(loop_idx[p] == 0){ q++; continue; /* g-quad in exterior loop */}
else{
energy += E_MLstem(0, -1, -1, P) * n_seq;
/* find its enclosing pair */
int num_elem, num_g, elem_i, elem_j, up_mis;
num_elem = 0;
num_g = 1;
r = p - 1;
up_mis = q - p + 1;
/* seek for first pairing base located 5' of the g-quad */
for(r = p - 1; !pt[r] && (r >= i); r--);
if(r < i) nrerror("this should not happen");
if(r < pt[r]){ /* found the enclosing pair */
s = pt[r];
} else {
num_elem++;
elem_i = pt[r];
elem_j = r;
r = pt[r]-1 ;
/* seek for next pairing base 5' of r */
for(; !pt[r] && (r >= i); r--);
if(r < i) nrerror("so nich");
if(r < pt[r]){ /* found the enclosing pair */
s = pt[r];
} else {
/* hop over stems and unpaired nucleotides */
while((r > pt[r]) && (r >= i)){
if(pt[r]){ r = pt[r]; num_elem++;}
r--;
}
if(r < i) nrerror("so nich");
s = pt[r]; /* found the enclosing pair */
}
}
/* now we have the enclosing pair (r,s) */
u = q+1;
/* we know everything about the 5' part of this loop so check the 3' part */
while(u<s){
if(structure[u-1] == '.') u++;
else if (structure[u-1] == '+'){ /* found another gquad */
pos = parse_gquad(structure + u - 1, &L, l);
if(pos > 0){
E_gquad_ali_en(u, L, l, (const short **)S, n_seq, gq_en, P);
energy += gq_en[0] + E_MLstem(0, -1, -1, P) * n_seq;
en_covar += gq_en[1];
up_mis += pos;
u += pos;
num_g++;
}
} else { /* we must have found a stem */
if(!(u < pt[u])) nrerror("wtf!");
num_elem++;
elem_i = u;
elem_j = pt[u];
en_corr_of_loop_gquad(u,
pt[u],
sequences,
structure,
pt,
loop_idx,
n_seq,
gq_en);
energy += gq_en[0];
en_covar += gq_en[1];
u = pt[u] + 1;
}
}
if(u!=s) nrerror("what the ...");
else{ /* we are done since we've found no other 3' structure element */
switch(num_elem){
/* g-quad was misinterpreted as hairpin closed by (r,s) */
case 0: /*if(num_g == 1)
if((p-r-1 == 0) || (s-q-1 == 0))
nrerror("too few unpaired bases");
*/
{
int ee = 0;
int cnt;
for(cnt=0;cnt<n_seq;cnt++){
type = pair[S[cnt][r]][S[cnt][s]];
if(type == 0) type = 7;
if ((a2s[cnt][s-1]-a2s[cnt][r])<3) ee+=600;
else ee += E_Hairpin( a2s[cnt][s-1] - a2s[cnt][r],
type,
S3[cnt][r],
S5[cnt][s],
Ss[cnt] + a2s[cnt][r-1],
P);
}
energy -= ee;
ee = 0;
for(cnt=0;cnt < n_seq; cnt++){
type = pair[S[cnt][r]][S[cnt][s]];
if(type == 0) type = 7;
if(dangles == 2)
ee += P->mismatchI[type][S3[cnt][r]][S5[cnt][s]];
if(type > 2)
ee += P->TerminalAU;
}
energy += ee;
}
energy += n_seq * P->internal_loop[s-r-1-up_mis];
break;
/* g-quad was misinterpreted as interior loop closed by (r,s) with enclosed pair (elem_i, elem_j) */
case 1: {
int ee = 0;
int cnt;
for(cnt = 0; cnt<n_seq;cnt++){
type = pair[S[cnt][r]][S[cnt][s]];
if(type == 0) type = 7;
type2 = pair[S[cnt][elem_j]][S[cnt][elem_i]];
if(type2 == 0) type2 = 7;
ee += E_IntLoop(a2s[cnt][elem_i-1] - a2s[cnt][r],
a2s[cnt][s-1] - a2s[cnt][elem_j],
type,
type2,
S3[cnt][r],
S5[cnt][s],
S5[cnt][elem_i],
S3[cnt][elem_j],
P);
}
energy -= ee;
ee = 0;
for(cnt = 0; cnt < n_seq; cnt++){
type = pair[S[cnt][s]][S[cnt][r]];
if(type == 0) type = 7;
ee += E_MLstem(type, S5[cnt][s], S3[cnt][r], P);
type = pair[S[cnt][elem_i]][S[cnt][elem_j]];
if(type == 0) type = 7;
ee += E_MLstem(type, S5[cnt][elem_i], S3[cnt][elem_j], P);
}
energy += ee;
}
energy += (P->MLclosing + (elem_i-r-1+s-elem_j-1-up_mis) * P->MLbase) * n_seq;
break;
/* gquad was misinterpreted as unpaired nucleotides in a multiloop */
default: energy -= (up_mis) * P->MLbase * n_seq;
break;
}
}
q = s+1;
}
}
en[0] = energy;
en[1] = en_covar;
}
PUBLIC float energy_of_ali_gquad_structure( const char **sequences,
const char *structure,
int n_seq,
float *energy){
int new=0;
unsigned int s, n;
short *pt;
short **tempS;
short **tempS5; /*S5[s][i] holds next base 5' of i in sequence s*/
short **tempS3; /*Sl[s][i] holds next base 3' of i in sequence s*/
char **tempSs;
unsigned short **tempa2s;
int *tempindx, *loop_idx;
int *temppscore;
int en_struct[2], gge[2];
if(sequences[0] != NULL){
n = (unsigned int) strlen(sequences[0]);
update_alifold_params();
/*save old memory*/
tempS = S; tempS3 = S3; tempS5 = S5; tempSs = Ss; tempa2s = a2s;
tempindx = indx; temppscore = pscore;
alloc_sequence_arrays(sequences, &S, &S5, &S3, &a2s, &Ss, 0);
pscore = (int *) space(sizeof(int)*((n+1)*(n+2)/2));
indx = get_indx(n);
make_pscores((const short *const*)S, sequences, n_seq, NULL);
new = 1;
pt = make_pair_table(structure);
energy_of_alistruct_pt(sequences,pt, n_seq, &(en_struct[0]));
loop_idx = make_loop_index_pt(pt);
en_corr_of_loop_gquad(1, n, sequences, structure, pt, loop_idx, n_seq, gge);
en_struct[0] += gge[0];
en_struct[1] += gge[1];
free(loop_idx);
free(pt);
energy[0] = (float)en_struct[0]/(float)(100*n_seq);
energy[1] = (float)en_struct[1]/(float)(100*n_seq);
free(pscore);
free(indx);
free_sequence_arrays(n_seq, &S, &S5, &S3, &a2s, &Ss);
/* restore old memory */
S = tempS; S3 = tempS3; S5 = tempS5; Ss = tempSs; a2s = tempa2s;
indx = tempindx; pscore = temppscore;
}
else nrerror("energy_of_alistruct(): no sequences in alignment!");
return energy[0];
}
PUBLIC float energy_of_alistruct(const char **sequences, const char *structure, int n_seq, float *energy){
int new=0;
unsigned int s, n;
short *pt;
short **tempS;
short **tempS5; /*S5[s][i] holds next base 5' of i in sequence s*/
short **tempS3; /*Sl[s][i] holds next base 3' of i in sequence s*/
char **tempSs;
unsigned short **tempa2s;
int *tempindx;
int *temppscore;
int en_struct[2];
if(sequences[0] != NULL){
n = (unsigned int) strlen(sequences[0]);
update_alifold_params();
/*save old memory*/
tempS = S; tempS3 = S3; tempS5 = S5; tempSs = Ss; tempa2s = a2s;
tempindx = indx; temppscore = pscore;
alloc_sequence_arrays(sequences, &S, &S5, &S3, &a2s, &Ss, 0);
pscore = (int *) space(sizeof(int)*((n+1)*(n+2)/2));
indx = get_indx(n);
make_pscores((const short *const*)S, sequences, n_seq, NULL);
new = 1;
pt = make_pair_table(structure);
energy_of_alistruct_pt(sequences,pt, n_seq, &(en_struct[0]));
free(pt);
energy[0] = (float)en_struct[0]/(float)(100*n_seq);
energy[1] = (float)en_struct[1]/(float)(100*n_seq);
free(pscore);
free(indx);
free_sequence_arrays(n_seq, &S, &S5, &S3, &a2s, &Ss);
/* restore old memory */
S = tempS; S3 = tempS3; S5 = tempS5; Ss = tempSs; a2s = tempa2s;
indx = tempindx; pscore = temppscore;
}
else nrerror("energy_of_alistruct(): no sequences in alignment!");
return energy[0];
}
PRIVATE void energy_of_alistruct_pt(const char **sequences, short *pt, int n_seq, int *energy){
int i, length;
length = S[0][0];
energy[0] = backtrack_type=='M' ? ML_Energy_pt(0, n_seq, pt) : EL_Energy_pt(0, n_seq, pt);
energy[1] = 0;
for (i=1; i<=length; i++) {
if (pt[i]==0) continue;
stack_energy_pt(i, sequences, pt, n_seq, energy);
i=pt[i];
}
}
PRIVATE void stack_energy_pt(int i, const char **sequences, short *pt, int n_seq, int *energy)
{
/* calculate energy of substructure enclosed by (i,j) */
int ee= 0;
int j, p, q, s;
int *type = (int *) space(n_seq*sizeof(int));
j = pt[i];
for (s=0; s<n_seq; s++) {
type[s] = pair[S[s][i]][S[s][j]];
if (type[s]==0) {
type[s]=7;
}
}
p=i; q=j;
while (p<q) { /* process all stacks and interior loops */
int type_2;
while (pt[++p]==0);
while (pt[--q]==0);
if ((pt[q]!=(short)p)||(p>q)) break;
ee=0;
for (s=0; s<n_seq; s++) {
type_2 = pair[S[s][q]][S[s][p]];
if (type_2==0) {
type_2=7;
}
ee += E_IntLoop(a2s[s][p-1]-a2s[s][i], a2s[s][j-1]-a2s[s][q], type[s], type_2, S3[s][i], S5[s][j], S5[s][p], S3[s][q], P);
}
energy[0] += ee;
energy[1] += pscore[indx[j]+i];
i=p; j=q;
for (s=0; s<n_seq; s++) {
type[s] = pair[S[s][i]][S[s][j]];
if (type[s]==0) type[s]=7;
}
} /* end while */
/* p,q don't pair must have found hairpin or multiloop */
if (p>q) {
ee=0;/* hair pin */
for (s=0; s< n_seq; s++) {
if ((a2s[s][j-1]-a2s[s][i])<3) ee+=600;
else ee += E_Hairpin(a2s[s][j-1]-a2s[s][i], type[s], S3[s][i], S5[s][j], Ss[s]+(a2s[s][i-1]), P);
}
energy[0] += ee;
energy[1] += pscore[indx[j]+i];
free(type);
return;
}
/* (i,j) is exterior pair of multiloop */
energy[1] += pscore[indx[j]+i];
while (p<j) {
/* add up the contributions of the substructures of the ML */
stack_energy_pt(p, sequences, pt, n_seq, energy);
p = pt[p];
/* search for next base pair in multiloop */
while (pt[++p]==0);
}
energy[0] += ML_Energy_pt(i, n_seq, pt);
free(type);
}
PRIVATE int ML_Energy_pt(int i, int n_seq, short *pt){
/* i is the 5'-base of the closing pair */
int energy, tt, i1, j, p, q, u, s;
short d5, d3;
j = pt[i];
i1 = i;
p = i+1;
u = 0;
energy = 0;
do{ /* walk around the multi-loop */
/* hop over unpaired positions */
while (p < j && pt[p]==0) p++;
if(p >= j) break;
/* get position of pairing partner */
q = pt[p];
/* memorize number of unpaired positions? no, we just approximate here */
u += p-i1-1;
for (s=0; s< n_seq; s++) {
/* get type of base pair P->q */
tt = pair[S[s][p]][S[s][q]];
if (tt==0) tt=7;
d5 = dangles && (a2s[s][p]>1) && (tt!=0) ? S5[s][p] : -1;
d3 = dangles && (a2s[s][q]<a2s[s][S[0][0]]) ? S3[s][q] : -1;
energy += E_MLstem(tt, d5, d3, P);
}
i1 = q;
p = q + 1;
}while(1);
if(i > 0){
energy += P->MLclosing * n_seq;
if(dangles){
for (s=0; s<n_seq; s++){
tt = pair[S[s][j]][S[s][i]];
if (tt==0) tt=7;
energy += E_MLstem(tt, S5[s][j], S3[s][i], P);
}
}
else{
for (s=0; s<n_seq; s++){
tt = pair[S[s][j]][S[s][i]];
if (tt==0) tt=7;
energy += E_MLstem(tt, -1, -1, P);
}
}
}
u += j - i1 - 1;
energy += u * P->MLbase * n_seq;
return energy;
}
PRIVATE int EL_Energy_pt(int i, int n_seq, short *pt){
int energy, tt, i1, j, p, q, s;
short d5, d3;
j = pt[0];
p = i+1;
energy = 0;
do{ /* walk along the backbone */
/* hop over unpaired positions */
while (p < j && pt[p]==0) p++;
if(p == j) break; /* no more stems */
/* get position of pairing partner */
q = pt[p];
for (s=0; s< n_seq; s++) {
/* get type of base pair P->q */
tt = pair[S[s][p]][S[s][q]];
if (tt==0) tt=7;
d5 = dangles && (a2s[s][p]>1) && (tt!=0) ? S5[s][p] : -1;
d3 = dangles && (a2s[s][q]<a2s[s][S[0][0]]) ? S3[s][q] : -1;
energy += E_ExtLoop(tt, d5, d3, P);
}
p = q + 1;
}while(p < j);
return energy;
}
|
csr_matop.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matrix operation functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
#include "csr_matrix.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAddFirstPass:
*
* Performs the first pass needed for Matrix/Matrix addition (C = A + B).
* This function:
* 1) Computes the row pointer of the resulting matrix C_i
* 2) Allocates memory for the matrix C and returns it to the user
*
* Notes: 1) It can be used safely inside OpenMP parallel regions.
* 2) firstrow, lastrow and marker are private variables.
* 3) The remaining arguments are shared variables.
* 4) twspace (thread workspace) must be allocated outside the
* parallel region.
* 5) The mapping arrays map_A2C and map_B2C are used when adding
* off-diagonal matrices. They can be set to NULL pointer when
* adding diagonal matrices.
* 6) Assumes that the elements of C_i are initialized to zero.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixAddFirstPass( HYPRE_Int firstrow,
HYPRE_Int lastrow,
HYPRE_Int *twspace,
HYPRE_Int *marker,
HYPRE_Int *map_A2C,
HYPRE_Int *map_B2C,
hypre_CSRMatrix *A,
hypre_CSRMatrix *B,
HYPRE_Int nrows_C,
HYPRE_Int nnzrows_C,
HYPRE_Int ncols_C,
HYPRE_Int *rownnz_C,
HYPRE_MemoryLocation memory_location_C,
HYPRE_Int *C_i,
hypre_CSRMatrix **C_ptr )
{
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Int i, ia, ib, ic, iic, ii, i1;
HYPRE_Int jcol, jj;
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int num_nonzeros;
/* Initialize marker array */
for (i = 0; i < ncols_C; i++)
{
marker[i] = -1;
}
ii = hypre_GetThreadNum();
num_nonzeros = 0;
for (ic = firstrow; ic < lastrow; ic++)
{
iic = rownnz_C ? rownnz_C[ic] : ic;
if (map_A2C)
{
for (ia = A_i[iic]; ia < A_i[iic+1]; ia++)
{
jcol = map_A2C[A_j[ia]];
marker[jcol] = iic;
num_nonzeros++;
}
}
else
{
for (ia = A_i[iic]; ia < A_i[iic+1]; ia++)
{
jcol = A_j[ia];
marker[jcol] = iic;
num_nonzeros++;
}
}
if (map_B2C)
{
for (ib = B_i[iic]; ib < B_i[iic+1]; ib++)
{
jcol = map_B2C[B_j[ib]];
if (marker[jcol] != iic)
{
marker[jcol] = iic;
num_nonzeros++;
}
}
}
else
{
for (ib = B_i[iic]; ib < B_i[iic+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] != iic)
{
marker[jcol] = iic;
num_nonzeros++;
}
}
}
C_i[iic+1] = num_nonzeros;
}
twspace[ii] = num_nonzeros;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Correct C_i - phase 1 */
if (ii)
{
jj = twspace[0];
for (i1 = 1; i1 < ii; i1++)
{
jj += twspace[i1];
}
for (ic = firstrow; ic < lastrow; ic++)
{
iic = rownnz_C ? rownnz_C[ic] : ic;
C_i[iic+1] += jj;
}
}
else
{
num_nonzeros = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
num_nonzeros += twspace[i1];
}
*C_ptr = hypre_CSRMatrixCreate(nrows_C, ncols_C, num_nonzeros);
hypre_CSRMatrixI(*C_ptr) = C_i;
hypre_CSRMatrixRownnz(*C_ptr) = rownnz_C;
hypre_CSRMatrixNumRownnz(*C_ptr) = nnzrows_C;
hypre_CSRMatrixInitialize_v2(*C_ptr, 0, memory_location_C);
}
/* Correct C_i - phase 2 */
if (rownnz_C != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (ic = firstrow; ic < (lastrow-1); ic++)
{
for (iic = rownnz_C[ic] + 1; iic < rownnz_C[ic+1]; iic++)
{
hypre_assert(C_i[iic+1] == 0);
C_i[iic+1] = C_i[rownnz_C[ic]+1];
}
}
if (ii < (num_threads - 1))
{
for (iic = rownnz_C[lastrow-1] + 1; iic < rownnz_C[lastrow]; iic++)
{
hypre_assert(C_i[iic+1] == 0);
C_i[iic+1] = C_i[rownnz_C[lastrow-1]+1];
}
}
else
{
for (iic = rownnz_C[lastrow-1] + 1; iic < nrows_C; iic++)
{
hypre_assert(C_i[iic+1] == 0);
C_i[iic+1] = C_i[rownnz_C[lastrow-1]+1];
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
#ifdef HYPRE_DEBUG
if (!ii)
{
for (i = 0; i < nrows_C; i++)
{
hypre_assert(C_i[i] <= C_i[i+1]);
hypre_assert(((A_i[i+1] - A_i[i]) +
(B_i[i+1] - B_i[i])) >=
(C_i[i+1] - C_i[i]));
hypre_assert((C_i[i+1] - C_i[i]) >= (A_i[i+1] - A_i[i]));
hypre_assert((C_i[i+1] - C_i[i]) >= (B_i[i+1] - B_i[i]));
}
hypre_assert((C_i[nrows_C] - C_i[0]) == num_nonzeros);
}
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAddSecondPass:
*
* Performs the second pass needed for Matrix/Matrix addition (C = A + B).
* This function computes C_j and C_data.
*
* Notes: see notes for hypre_CSRMatrixAddFirstPass
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixAddSecondPass( HYPRE_Int firstrow,
HYPRE_Int lastrow,
HYPRE_Int *twspace,
HYPRE_Int *marker,
HYPRE_Int *map_A2C,
HYPRE_Int *map_B2C,
HYPRE_Int *rownnz_C,
HYPRE_Complex alpha,
HYPRE_Complex beta,
hypre_CSRMatrix *A,
hypre_CSRMatrix *B,
hypre_CSRMatrix *C )
{
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int nnzs_A = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int nnzs_B = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int *C_i = hypre_CSRMatrixI(C);
HYPRE_Int *C_j = hypre_CSRMatrixJ(C);
HYPRE_Complex *C_data = hypre_CSRMatrixData(C);
HYPRE_Int ncols_C = hypre_CSRMatrixNumCols(C);
HYPRE_Int ia, ib, ic, iic;
HYPRE_Int jcol, pos;
hypre_assert(( map_A2C && map_B2C) ||
(!map_A2C && !map_B2C) ||
( map_A2C && (nnzs_B == 0)) ||
( map_B2C && (nnzs_A == 0)));
/* Initialize marker vector */
for (ia = 0; ia < ncols_C; ia++)
{
marker[ia] = -1;
}
pos = C_i[rownnz_C ? rownnz_C[firstrow] : firstrow];
if ((map_A2C && map_B2C) || ( map_A2C && (nnzs_B == 0)) || ( map_B2C && (nnzs_A == 0)))
{
for (ic = firstrow; ic < lastrow; ic++)
{
iic = rownnz_C ? rownnz_C[ic] : ic;
for (ia = A_i[iic]; ia < A_i[iic+1]; ia++)
{
jcol = map_A2C[A_j[ia]];
C_j[pos] = jcol;
C_data[pos] = alpha*A_data[ia];
marker[jcol] = pos;
pos++;
}
for (ib = B_i[iic]; ib < B_i[iic+1]; ib++)
{
jcol = map_B2C[B_j[ib]];
if (marker[jcol] < C_i[iic])
{
C_j[pos] = jcol;
C_data[pos] = beta*B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
hypre_assert(C_j[marker[jcol]] == jcol);
C_data[marker[jcol]] += beta*B_data[ib];
}
}
hypre_assert(pos == C_i[iic+1]);
} /* end for loop */
}
else
{
for (ic = firstrow; ic < lastrow; ic++)
{
iic = rownnz_C ? rownnz_C[ic] : ic;
for (ia = A_i[iic]; ia < A_i[iic+1]; ia++)
{
jcol = A_j[ia];
C_j[pos] = jcol;
C_data[pos] = alpha*A_data[ia];
marker[jcol] = pos;
pos++;
}
for (ib = B_i[iic]; ib < B_i[iic+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] < C_i[iic])
{
C_j[pos] = jcol;
C_data[pos] = beta*B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
hypre_assert(C_j[marker[jcol]] == jcol);
C_data[marker[jcol]] += beta*B_data[ib];
}
}
hypre_assert(pos == C_i[iic+1]);
} /* end for loop */
}
hypre_assert(pos == C_i[rownnz_C ? rownnz_C[lastrow-1] + 1 : lastrow]);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAdd:
*
* Adds two CSR Matrices A and B and returns a CSR Matrix C;
*
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*
* This function is ready to compute C = alpha*A + beta*B if needed.
*--------------------------------------------------------------------------*/
hypre_CSRMatrix*
hypre_CSRMatrixAddHost ( hypre_CSRMatrix *A,
hypre_CSRMatrix *B )
{
/* CSRMatrix A */
HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
/* CSRMatrix B */
HYPRE_Int *rownnz_B = hypre_CSRMatrixRownnz(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int nnzrows_B = hypre_CSRMatrixNumRownnz(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
/* CSRMatrix C */
hypre_CSRMatrix *C;
HYPRE_Int *C_i;
HYPRE_Int *rownnz_C;
HYPRE_Int nnzrows_C;
HYPRE_Int *twspace;
HYPRE_Complex alpha = 1.0;
HYPRE_Complex beta = 1.0;
HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
if (nrows_A != nrows_B || ncols_A != ncols_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n");
return NULL;
}
/* Allocate memory */
twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C);
/* Set nonzero rows data of diag_C */
nnzrows_C = nrows_A;
if ((nnzrows_A < nrows_A) && (nnzrows_B < nrows_B))
{
hypre_MergeOrderedArrays(nnzrows_A, rownnz_A,
nnzrows_B, rownnz_B,
&nnzrows_C, &rownnz_C);
}
else
{
rownnz_C = NULL;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ns, ne;
HYPRE_Int *marker = NULL;
hypre_partition1D(nnzrows_C, hypre_NumActiveThreads(), hypre_GetThreadNum(), &ns, &ne);
marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST);
hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker, NULL, NULL,
A, B, nrows_A, nnzrows_C, ncols_A, rownnz_C,
memory_location_C, C_i, &C);
hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker, NULL, NULL,
rownnz_C, alpha, beta, A, B, C);
hypre_TFree(marker, HYPRE_MEMORY_HOST);
} /* end of parallel region */
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
return C;
}
hypre_CSRMatrix*
hypre_CSRMatrixAdd( hypre_CSRMatrix *A,
hypre_CSRMatrix *B)
{
hypre_CSRMatrix *C = NULL;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A),
hypre_CSRMatrixMemoryLocation(B) );
if (exec == HYPRE_EXEC_DEVICE)
{
C = hypre_CSRMatrixAddDevice(A, B);
}
else
#endif
{
C = hypre_CSRMatrixAddHost(A, B);
}
return C;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixBigAdd:
*
* Adds two CSR Matrices A and B with column indices stored as HYPRE_BigInt
* and returns a CSR Matrix C;
*
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixBigAdd( hypre_CSRMatrix *A,
hypre_CSRMatrix *B )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_BigInt *A_j = hypre_CSRMatrixBigJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_BigInt *B_j = hypre_CSRMatrixBigJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_BigInt *C_j;
HYPRE_Int *twspace;
HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
if (nrows_A != nrows_B || ncols_A != ncols_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n");
return NULL;
}
/* Allocate memory */
twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ia, ib, ic, num_nonzeros;
HYPRE_Int ns, ne, pos;
HYPRE_BigInt jcol;
HYPRE_Int ii, num_threads;
HYPRE_Int jj;
HYPRE_Int *marker = NULL;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(nrows_A, num_threads, ii, &ns, &ne);
marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST);
for (ia = 0; ia < ncols_A; ia++)
{
marker[ia] = -1;
}
/* First pass */
num_nonzeros = 0;
for (ic = ns; ic < ne; ic++)
{
C_i[ic] = num_nonzeros;
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
marker[jcol] = ic;
num_nonzeros++;
}
for (ib = B_i[ic]; ib < B_i[ic+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] != ic)
{
marker[jcol] = ic;
num_nonzeros++;
}
}
C_i[ic+1] = num_nonzeros;
}
twspace[ii] = num_nonzeros;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Correct row pointer */
if (ii)
{
jj = twspace[0];
for (ic = 1; ic < ii; ic++)
{
jj += twspace[ia];
}
for (ic = ns; ic < ne; ic++)
{
C_i[ic] += jj;
}
}
else
{
C_i[nrows_A] = 0;
for (ic = 0; ic < num_threads; ic++)
{
C_i[nrows_A] += twspace[ic];
}
C = hypre_CSRMatrixCreate(nrows_A, ncols_A, C_i[nrows_A]);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixInitialize_v2(C, 1, memory_location_C);
C_j = hypre_CSRMatrixBigJ(C);
C_data = hypre_CSRMatrixData(C);
}
/* Second pass */
for (ia = 0; ia < ncols_A; ia++)
{
marker[ia] = -1;
}
pos = C_i[ns];
for (ic = ns; ic < ne; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
C_j[pos] = jcol;
C_data[pos] = A_data[ia];
marker[jcol] = pos;
pos++;
}
for (ib = B_i[ic]; ib < B_i[ic+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] < C_i[ic])
{
C_j[pos] = jcol;
C_data[pos] = B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
C_data[marker[jcol]] += B_data[ib];
}
}
}
hypre_TFree(marker, HYPRE_MEMORY_HOST);
} /* end of parallel region */
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
return C;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMultiplyHost
*
* Multiplies two CSR Matrices A and B and returns a CSR Matrix C;
*
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix*
hypre_CSRMatrixMultiplyHost( hypre_CSRMatrix *A,
hypre_CSRMatrix *B )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A);
HYPRE_Int num_nnz_A = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
HYPRE_Int num_nnz_B = hypre_CSRMatrixNumNonzeros(B);
HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_Int *C_j;
HYPRE_Int ia, ib, ic, ja, jb, num_nonzeros;
HYPRE_Int counter;
HYPRE_Complex a_entry, b_entry;
HYPRE_Int allsquare = 0;
HYPRE_Int *twspace;
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
if (ncols_A != nrows_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n");
return NULL;
}
if (nrows_A == ncols_B)
{
allsquare = 1;
}
if ((num_nnz_A == 0) || (num_nnz_B == 0))
{
C = hypre_CSRMatrixCreate(nrows_A, ncols_B, 0);
hypre_CSRMatrixNumRownnz(C) = 0;
hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C);
return C;
}
/* Allocate memory */
twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, memory_location_C);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(ia, ib, ic, ja, jb, num_nonzeros, counter, a_entry, b_entry)
#endif
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int ns, ne, ii, jj;
HYPRE_Int num_threads;
HYPRE_Int i1, iic;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(nnzrows_A, num_threads, ii, &ns, &ne);
B_marker = hypre_CTAlloc(HYPRE_Int, ncols_B, HYPRE_MEMORY_HOST);
for (ib = 0; ib < ncols_B; ib++)
{
B_marker[ib] = -1;
}
/* First pass: compute sizes of C rows. */
num_nonzeros = 0;
for (ic = ns; ic < ne; ic++)
{
if (rownnz_A)
{
iic = rownnz_A[ic];
C_i[iic] = num_nonzeros;
}
else
{
iic = ic;
C_i[iic] = num_nonzeros;
if (allsquare)
{
B_marker[iic] = iic;
num_nonzeros++;
}
}
for (ia = A_i[iic]; ia < A_i[iic+1]; ia++)
{
ja = A_j[ia];
for (ib = B_i[ja]; ib < B_i[ja+1]; ib++)
{
jb = B_j[ib];
if (B_marker[jb] != iic)
{
B_marker[jb] = iic;
num_nonzeros++;
}
}
}
}
twspace[ii] = num_nonzeros;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Correct C_i - phase 1 */
if (ii)
{
jj = twspace[0];
for (i1 = 1; i1 < ii; i1++)
{
jj += twspace[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
iic = rownnz_A ? rownnz_A[i1] : i1;
C_i[iic] += jj;
}
}
else
{
C_i[nrows_A] = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
C_i[nrows_A] += twspace[i1];
}
C = hypre_CSRMatrixCreate(nrows_A, ncols_B, C_i[nrows_A]);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C);
C_j = hypre_CSRMatrixJ(C);
C_data = hypre_CSRMatrixData(C);
}
/* Correct C_i - phase 2 */
if (rownnz_A != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (ic = ns; ic < (ne-1); ic++)
{
for (iic = rownnz_A[ic] + 1; iic < rownnz_A[ic+1]; iic++)
{
C_i[iic] = C_i[rownnz_A[ic+1]];
}
}
if (ii < (num_threads - 1))
{
for (iic = rownnz_A[ne-1] + 1; iic < rownnz_A[ne]; iic++)
{
C_i[iic] = C_i[rownnz_A[ne]];
}
}
else
{
for (iic = rownnz_A[ne-1] + 1; iic < nrows_A; iic++)
{
C_i[iic] = C_i[nrows_A];
}
}
}
/* End of First Pass */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Second pass: Fill in C_data and C_j. */
for (ib = 0; ib < ncols_B; ib++)
{
B_marker[ib] = -1;
}
counter = rownnz_A ? C_i[rownnz_A[ns]] : C_i[ns];
for (ic = ns; ic < ne; ic++)
{
if (rownnz_A)
{
iic = rownnz_A[ic];
}
else
{
iic = ic;
if (allsquare)
{
B_marker[ic] = counter;
C_data[counter] = 0;
C_j[counter] = ic;
counter++;
}
}
for (ia = A_i[iic]; ia < A_i[iic+1]; ia++)
{
ja = A_j[ia];
a_entry = A_data[ia];
for (ib = B_i[ja]; ib < B_i[ja+1]; ib++)
{
jb = B_j[ib];
b_entry = B_data[ib];
if (B_marker[jb] < C_i[iic])
{
B_marker[jb] = counter;
C_j[B_marker[jb]] = jb;
C_data[B_marker[jb]] = a_entry*b_entry;
counter++;
}
else
{
C_data[B_marker[jb]] += a_entry*b_entry;
}
}
}
}
/* End of Second Pass */
hypre_TFree(B_marker, HYPRE_MEMORY_HOST);
} /*end parallel region */
#ifdef HYPRE_DEBUG
for (ic = 0; ic < nrows_A; ic++)
{
hypre_assert(C_i[ic] <= C_i[ic+1]);
}
#endif
// Set rownnz and num_rownnz
hypre_CSRMatrixSetRownnz(C);
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
return C;
}
hypre_CSRMatrix*
hypre_CSRMatrixMultiply( hypre_CSRMatrix *A,
hypre_CSRMatrix *B)
{
hypre_CSRMatrix *C = NULL;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A),
hypre_CSRMatrixMemoryLocation(B) );
if (exec == HYPRE_EXEC_DEVICE)
{
C = hypre_CSRMatrixMultiplyDevice(A,B);
}
else
#endif
{
C = hypre_CSRMatrixMultiplyHost(A,B);
}
return C;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixDeleteZeros( hypre_CSRMatrix *A,
HYPRE_Real tol )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
hypre_CSRMatrix *B;
HYPRE_Complex *B_data;
HYPRE_Int *B_i;
HYPRE_Int *B_j;
HYPRE_Int zeros;
HYPRE_Int i, j;
HYPRE_Int pos_A, pos_B;
zeros = 0;
for (i = 0; i < num_nonzeros; i++)
{
if (hypre_cabs(A_data[i]) <= tol)
{
zeros++;
}
}
if (zeros)
{
B = hypre_CSRMatrixCreate(nrows_A,ncols_A,num_nonzeros-zeros);
hypre_CSRMatrixInitialize(B);
B_i = hypre_CSRMatrixI(B);
B_j = hypre_CSRMatrixJ(B);
B_data = hypre_CSRMatrixData(B);
B_i[0] = 0;
pos_A = pos_B = 0;
for (i = 0; i < nrows_A; i++)
{
for (j = A_i[i]; j < A_i[i+1]; j++)
{
if (hypre_cabs(A_data[j]) <= tol)
{
pos_A++;
}
else
{
B_data[pos_B] = A_data[pos_A];
B_j[pos_B] = A_j[pos_A];
pos_B++;
pos_A++;
}
}
B_i[i+1] = pos_B;
}
return B;
}
else
{
return NULL;
}
}
/******************************************************************************
*
* Finds transpose of a hypre_CSRMatrix
*
*****************************************************************************/
/**
* idx = idx2*dim1 + idx1
* -> ret = idx1*dim2 + idx2
* = (idx%dim1)*dim2 + idx/dim1
*/
static inline HYPRE_Int
transpose_idx (HYPRE_Int idx, HYPRE_Int dim1, HYPRE_Int dim2)
{
return idx%dim1*dim2 + idx/dim1;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixTransposeHost
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixTransposeHost(hypre_CSRMatrix *A,
hypre_CSRMatrix **AT,
HYPRE_Int data)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A);
HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A);
HYPRE_Int num_rows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int num_nnzs_A = hypre_CSRMatrixNumNonzeros(A);
HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A);
HYPRE_Complex *AT_data;
HYPRE_Int *AT_j;
HYPRE_Int num_rows_AT;
HYPRE_Int num_cols_AT;
HYPRE_Int num_nnzs_AT;
HYPRE_Int max_col;
HYPRE_Int i, j;
/*--------------------------------------------------------------
* First, ascertain that num_cols and num_nonzeros has been set.
* If not, set them.
*--------------------------------------------------------------*/
HYPRE_ANNOTATE_FUNC_BEGIN;
if (!num_nnzs_A)
{
num_nnzs_A = A_i[num_rows_A];
}
if (num_rows_A && num_nnzs_A && ! num_cols_A)
{
max_col = -1;
for (i = 0; i < num_rows_A; ++i)
{
for (j = A_i[i]; j < A_i[i+1]; j++)
{
if (A_j[j] > max_col)
{
max_col = A_j[j];
}
}
}
num_cols_A = max_col + 1;
}
num_rows_AT = num_cols_A;
num_cols_AT = num_rows_A;
num_nnzs_AT = num_nnzs_A;
*AT = hypre_CSRMatrixCreate(num_rows_AT, num_cols_AT, num_nnzs_AT);
hypre_CSRMatrixMemoryLocation(*AT) = memory_location;
if (num_cols_A == 0)
{
// JSP: parallel counting sorting breaks down
// when A has no columns
hypre_CSRMatrixInitialize(*AT);
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
AT_j = hypre_CTAlloc(HYPRE_Int, num_nnzs_AT, memory_location);
hypre_CSRMatrixJ(*AT) = AT_j;
if (data)
{
AT_data = hypre_CTAlloc(HYPRE_Complex, num_nnzs_AT, memory_location);
hypre_CSRMatrixData(*AT) = AT_data;
}
/*-----------------------------------------------------------------
* Parallel count sort
*-----------------------------------------------------------------*/
HYPRE_Int *bucket = hypre_CTAlloc(HYPRE_Int, (num_cols_A + 1)*hypre_NumThreads(),
HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ii, num_threads, ns, ne;
HYPRE_Int i, j, j0, j1, ir;
HYPRE_Int idx, offset;
HYPRE_Int transpose_i;
HYPRE_Int transpose_i_minus_1;
HYPRE_Int transpose_i0;
HYPRE_Int transpose_j0;
HYPRE_Int transpose_j1;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(nnzrows_A, num_threads, ii, &ns, &ne);
/*-----------------------------------------------------------------
* Count the number of entries that will go into each bucket
* bucket is used as HYPRE_Int[num_threads][num_colsA] 2D array
*-----------------------------------------------------------------*/
if (rownnz_A == NULL)
{
for (j = A_i[ns]; j < A_i[ne]; ++j)
{
bucket[ii*num_cols_A + A_j[j]]++;
}
}
else
{
for (i = ns; i < ne; i++)
{
ir = rownnz_A[i];
for (j = A_i[ir]; j < A_i[ir+1]; ++j)
{
bucket[ii*num_cols_A + A_j[j]]++;
}
}
}
/*-----------------------------------------------------------------
* Parallel prefix sum of bucket with length num_colsA * num_threads
* accessed as if it is transposed as HYPRE_Int[num_colsA][num_threads]
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ii*num_cols_A + 1; i < (ii + 1)*num_cols_A; ++i)
{
transpose_i = transpose_idx(i, num_threads, num_cols_A);
transpose_i_minus_1 = transpose_idx(i - 1, num_threads, num_cols_A);
bucket[transpose_i] += bucket[transpose_i_minus_1];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#pragma omp master
#endif
{
for (i = 1; i < num_threads; ++i)
{
j0 = num_cols_A*i - 1;
j1 = num_cols_A*(i + 1) - 1;
transpose_j0 = transpose_idx(j0, num_threads, num_cols_A);
transpose_j1 = transpose_idx(j1, num_threads, num_cols_A);
bucket[transpose_j1] += bucket[transpose_j0];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii > 0)
{
transpose_i0 = transpose_idx(num_cols_A*ii - 1, num_threads, num_cols_A);
offset = bucket[transpose_i0];
for (i = ii*num_cols_A; i < (ii + 1)*num_cols_A - 1; ++i)
{
transpose_i = transpose_idx(i, num_threads, num_cols_A);
bucket[transpose_i] += offset;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*----------------------------------------------------------------
* Load the data and column numbers of AT
*----------------------------------------------------------------*/
if (data)
{
for (i = ne - 1; i >= ns; --i)
{
ir = rownnz_A ? rownnz_A[i] : i;
for (j = A_i[ir + 1] - 1; j >= A_i[ir]; --j)
{
idx = A_j[j];
--bucket[ii*num_cols_A + idx];
offset = bucket[ii*num_cols_A + idx];
AT_data[offset] = A_data[j];
AT_j[offset] = ir;
}
}
}
else
{
for (i = ne - 1; i >= ns; --i)
{
ir = rownnz_A ? rownnz_A[i] : i;
for (j = A_i[ir + 1] - 1; j >= A_i[ir]; --j)
{
idx = A_j[j];
--bucket[ii*num_cols_A + idx];
offset = bucket[ii*num_cols_A + idx];
AT_j[offset] = ir;
}
}
}
} /* end parallel region */
hypre_CSRMatrixI(*AT) = hypre_TAlloc(HYPRE_Int, num_cols_A + 1, memory_location);
hypre_TMemcpy(hypre_CSRMatrixI(*AT), bucket, HYPRE_Int, num_cols_A + 1, memory_location, HYPRE_MEMORY_HOST);
hypre_CSRMatrixI(*AT)[num_cols_A] = num_nnzs_A;
hypre_TFree(bucket, HYPRE_MEMORY_HOST);
/* Move diagonal to first entry (for square matrices only)*/
if(num_rows_A == num_cols_A)
{
hypre_CSRMatrixReorder(*AT);
}
// Set rownnz and num_rownnz
if (hypre_CSRMatrixNumRownnz(A) < num_rows_A)
{
hypre_CSRMatrixSetRownnz(*AT);
}
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixTranspose
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixTranspose(hypre_CSRMatrix *A,
hypre_CSRMatrix **AT,
HYPRE_Int data)
{
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_CSRMatrixTransposeDevice(A, AT, data);
}
else
#endif
{
ierr = hypre_CSRMatrixTransposeHost(A, AT, data);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixSplit
*--------------------------------------------------------------------------*/
/* RL: TODO add memory locations */
HYPRE_Int hypre_CSRMatrixSplit(hypre_CSRMatrix *Bs_ext,
HYPRE_BigInt first_col_diag_B,
HYPRE_BigInt last_col_diag_B,
HYPRE_Int num_cols_offd_B,
HYPRE_BigInt *col_map_offd_B,
HYPRE_Int *num_cols_offd_C_ptr,
HYPRE_BigInt **col_map_offd_C_ptr,
hypre_CSRMatrix **Bext_diag_ptr,
hypre_CSRMatrix **Bext_offd_ptr)
{
HYPRE_Complex *Bs_ext_data = hypre_CSRMatrixData(Bs_ext);
HYPRE_Int *Bs_ext_i = hypre_CSRMatrixI(Bs_ext);
HYPRE_BigInt *Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext);
HYPRE_Int num_rows_Bext = hypre_CSRMatrixNumRows(Bs_ext);
HYPRE_Int B_ext_diag_size = 0;
HYPRE_Int B_ext_offd_size = 0;
HYPRE_Int *B_ext_diag_i = NULL;
HYPRE_Int *B_ext_diag_j = NULL;
HYPRE_Complex *B_ext_diag_data = NULL;
HYPRE_Int *B_ext_offd_i = NULL;
HYPRE_Int *B_ext_offd_j = NULL;
HYPRE_Complex *B_ext_offd_data = NULL;
HYPRE_Int *my_diag_array;
HYPRE_Int *my_offd_array;
HYPRE_BigInt *temp;
HYPRE_Int max_num_threads;
HYPRE_Int cnt = 0;
hypre_CSRMatrix *Bext_diag = NULL;
hypre_CSRMatrix *Bext_offd = NULL;
HYPRE_BigInt *col_map_offd_C = NULL;
HYPRE_Int num_cols_offd_C = 0;
B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext+1, HYPRE_MEMORY_HOST);
B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext+1, HYPRE_MEMORY_HOST);
max_num_threads = hypre_NumThreads();
my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ns, ne, ii, num_threads;
HYPRE_Int i1, i, j;
HYPRE_Int my_offd_size, my_diag_size;
HYPRE_Int cnt_offd, cnt_diag;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(num_rows_Bext, num_threads, ii, &ns, &ne);
my_diag_size = 0;
my_offd_size = 0;
for (i=ns; i < ne; i++)
{
B_ext_diag_i[i] = my_diag_size;
B_ext_offd_i[i] = my_offd_size;
for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B)
{
my_offd_size++;
}
else
{
my_diag_size++;
}
}
}
my_diag_array[ii] = my_diag_size;
my_offd_array[ii] = my_offd_size;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii)
{
my_diag_size = my_diag_array[0];
my_offd_size = my_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
my_diag_size += my_diag_array[i1];
my_offd_size += my_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
B_ext_diag_i[i1] += my_diag_size;
B_ext_offd_i[i1] += my_offd_size;
}
}
else
{
B_ext_diag_size = 0;
B_ext_offd_size = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
B_ext_diag_size += my_diag_array[i1];
B_ext_offd_size += my_offd_array[i1];
}
B_ext_diag_i[num_rows_Bext] = B_ext_diag_size;
B_ext_offd_i[num_rows_Bext] = B_ext_offd_size;
if (B_ext_diag_size)
{
B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST);
B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size)
{
B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST);
B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size || num_cols_offd_B)
{
temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size + num_cols_offd_B, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
cnt_offd = B_ext_offd_i[ns];
cnt_diag = B_ext_diag_i[ns];
for (i = ns; i < ne; i++)
{
for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B)
{
temp[cnt_offd] = Bs_ext_j[j];
B_ext_offd_j[cnt_offd] = Bs_ext_j[j];
B_ext_offd_data[cnt_offd++] = Bs_ext_data[j];
}
else
{
B_ext_diag_j[cnt_diag] = Bs_ext_j[j] - first_col_diag_B;
B_ext_diag_data[cnt_diag++] = Bs_ext_data[j];
}
}
}
/* This computes the mappings */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii == 0)
{
cnt = 0;
if (B_ext_offd_size || num_cols_offd_B)
{
cnt = B_ext_offd_size;
for (i=0; i < num_cols_offd_B; i++)
{
temp[cnt++] = col_map_offd_B[i];
}
if (cnt)
{
hypre_BigQsort0(temp, 0, cnt-1);
num_cols_offd_C = 1;
HYPRE_BigInt value = temp[0];
for (i = 1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_C++] = value;
}
}
}
if (num_cols_offd_C)
{
col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST);
}
for (i = 0; i < num_cols_offd_C; i++)
{
col_map_offd_C[i] = temp[i];
}
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ns; i < ne; i++)
{
for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++)
{
B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_ext_offd_j[j], num_cols_offd_C);
}
}
} /* end parallel region */
hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST);
Bext_diag = hypre_CSRMatrixCreate(num_rows_Bext, last_col_diag_B-first_col_diag_B+1, B_ext_diag_size);
hypre_CSRMatrixMemoryLocation(Bext_diag) = HYPRE_MEMORY_HOST;
Bext_offd = hypre_CSRMatrixCreate(num_rows_Bext, num_cols_offd_C, B_ext_offd_size);
hypre_CSRMatrixMemoryLocation(Bext_offd) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(Bext_diag) = B_ext_diag_i;
hypre_CSRMatrixJ(Bext_diag) = B_ext_diag_j;
hypre_CSRMatrixData(Bext_diag) = B_ext_diag_data;
hypre_CSRMatrixI(Bext_offd) = B_ext_offd_i;
hypre_CSRMatrixJ(Bext_offd) = B_ext_offd_j;
hypre_CSRMatrixData(Bext_offd) = B_ext_offd_data;
*col_map_offd_C_ptr = col_map_offd_C;
*Bext_diag_ptr = Bext_diag;
*Bext_offd_ptr = Bext_offd;
*num_cols_offd_C_ptr = num_cols_offd_C;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixReorder:
* Reorders the column and data arrays of a square CSR matrix, such that the
* first entry in each row is the diagonal one.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixReorder(hypre_CSRMatrix *A)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A);
HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A);
HYPRE_Int num_rows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int i, ii, j;
/* the matrix should be square */
if (num_rows_A != num_cols_A)
{
return -1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, ii, j) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < nnzrows_A; i++)
{
ii = rownnz_A ? rownnz_A[i] : i;
for (j = A_i[ii]; j < A_i[ii+1]; j++)
{
if (A_j[j] == ii)
{
if (j != A_i[ii])
{
hypre_swap(A_j, A_i[ii], j);
hypre_swap_c(A_data, A_i[ii], j);
}
break;
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAddPartial:
* adds matrix rows in the CSR matrix B to the CSR Matrix A, where row_nums[i]
* defines to which row of A the i-th row of B is added, and returns a CSR Matrix C;
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixAddPartial( hypre_CSRMatrix *A,
hypre_CSRMatrix *B,
HYPRE_Int *row_nums)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_Int *C_j;
HYPRE_Int ia, ib, ic, jcol, num_nonzeros;
HYPRE_Int pos, i, i2, j, cnt;
HYPRE_Int *marker;
HYPRE_Int *map;
HYPRE_Int *temp;
HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
if (ncols_A != ncols_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n");
return NULL;
}
map = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST);
temp = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST);
for (i=0; i < nrows_B; i++)
{
map[i] = i;
temp[i] = row_nums[i];
}
hypre_qsort2i(temp,map,0,nrows_B-1);
marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, memory_location_C);
for (ia = 0; ia < ncols_A; ia++)
{
marker[ia] = -1;
}
num_nonzeros = 0;
C_i[0] = 0;
cnt = 0;
for (ic = 0; ic < nrows_A; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
marker[jcol] = ic;
num_nonzeros++;
}
if (cnt < nrows_B && temp[cnt] == ic)
{
for (j = cnt; j < nrows_B; j++)
{
if (temp[j] == ic)
{
i2 = map[cnt++];
for (ib = B_i[i2]; ib < B_i[i2+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] != ic)
{
marker[jcol] = ic;
num_nonzeros++;
}
}
}
else
{
break;
}
}
}
C_i[ic+1] = num_nonzeros;
}
C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C);
C_j = hypre_CSRMatrixJ(C);
C_data = hypre_CSRMatrixData(C);
for (ia = 0; ia < ncols_A; ia++)
{
marker[ia] = -1;
}
cnt = 0;
pos = 0;
for (ic = 0; ic < nrows_A; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
C_j[pos] = jcol;
C_data[pos] = A_data[ia];
marker[jcol] = pos;
pos++;
}
if (cnt < nrows_B && temp[cnt] == ic)
{
for (j = cnt; j < nrows_B; j++)
{
if (temp[j] == ic)
{
i2 = map[cnt++];
for (ib = B_i[i2]; ib < B_i[i2+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] < C_i[ic])
{
C_j[pos] = jcol;
C_data[pos] = B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
C_data[marker[jcol]] += B_data[ib];
}
}
}
else
{
break;
}
}
}
}
hypre_TFree(marker, HYPRE_MEMORY_HOST);
hypre_TFree(map, HYPRE_MEMORY_HOST);
hypre_TFree(temp, HYPRE_MEMORY_HOST);
return C;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixSumElts:
* Returns the sum of all matrix elements.
*--------------------------------------------------------------------------*/
HYPRE_Complex
hypre_CSRMatrixSumElts( hypre_CSRMatrix *A )
{
HYPRE_Complex sum = 0;
HYPRE_Complex *data = hypre_CSRMatrixData(A);
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_nonzeros; i++)
{
sum += data[i];
}
return sum;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixFnorm
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_CSRMatrixFnorm( hypre_CSRMatrix *A )
{
HYPRE_Int nrows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int i;
HYPRE_Complex sum = 0;
hypre_assert(num_nonzeros == A_i[nrows]);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_nonzeros; ++i)
{
HYPRE_Complex v = A_data[i];
sum += v * v;
}
return sqrt(sum);
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixComputeRowSumHost
*
* type == 0, sum,
* 1, abs sum
* 2, square sum
*--------------------------------------------------------------------------*/
void
hypre_CSRMatrixComputeRowSumHost( hypre_CSRMatrix *A,
HYPRE_Int *CF_i,
HYPRE_Int *CF_j,
HYPRE_Complex *row_sum,
HYPRE_Int type,
HYPRE_Complex scal,
const char *set_or_add)
{
HYPRE_Int nrows = hypre_CSRMatrixNumRows(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int i, j;
for (i = 0; i < nrows; i++)
{
HYPRE_Complex row_sum_i = set_or_add[0] == 's' ? 0.0 : row_sum[i];
for (j = A_i[i]; j < A_i[i+1]; j++)
{
if (CF_i && CF_j && CF_i[i] != CF_j[A_j[j]])
{
continue;
}
if (type == 0)
{
row_sum_i += scal * A_data[j];
}
else if (type == 1)
{
row_sum_i += scal * fabs(A_data[j]);
}
else if (type == 2)
{
row_sum_i += scal * A_data[j] * A_data[j];
}
}
row_sum[i] = row_sum_i;
}
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixComputeRowSum
*--------------------------------------------------------------------------*/
void
hypre_CSRMatrixComputeRowSum( hypre_CSRMatrix *A,
HYPRE_Int *CF_i,
HYPRE_Int *CF_j,
HYPRE_Complex *row_sum,
HYPRE_Int type,
HYPRE_Complex scal,
const char *set_or_add)
{
hypre_assert( (CF_i && CF_j) || (!CF_i && !CF_j) );
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_CSRMatrixComputeRowSumDevice(A, CF_i, CF_j, row_sum, type, scal, set_or_add);
}
else
#endif
{
hypre_CSRMatrixComputeRowSumHost(A, CF_i, CF_j, row_sum, type, scal, set_or_add);
}
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixExtractDiagonalHost
*--------------------------------------------------------------------------*/
void
hypre_CSRMatrixExtractDiagonalHost( hypre_CSRMatrix *A,
HYPRE_Complex *d,
HYPRE_Int type)
{
HYPRE_Int nrows = hypre_CSRMatrixNumRows(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int i, j;
HYPRE_Complex d_i;
for (i = 0; i < nrows; i++)
{
d_i = 0.0;
for (j = A_i[i]; j < A_i[i+1]; j++)
{
if (A_j[j] == i)
{
if (type == 0)
{
d_i = A_data[j];
}
else if (type == 1)
{
d_i = fabs(A_data[j]);
}
break;
}
}
d[i] = d_i;
}
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixExtractDiagonal
*
* type 0: diag
* 1: abs diag
*--------------------------------------------------------------------------*/
void
hypre_CSRMatrixExtractDiagonal( hypre_CSRMatrix *A,
HYPRE_Complex *d,
HYPRE_Int type)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_CSRMatrixExtractDiagonalDevice(A, d, type);
}
else
#endif
{
hypre_CSRMatrixExtractDiagonalHost(A, d, type);
}
}
|
z_solve.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB BT code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
#include "work_lhs.h"
#include "timers.h"
//---------------------------------------------------------------------
// Performs line solves in Z direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//---------------------------------------------------------------------
void z_solve()
{
// printf("zzzzzzzzz\n");
int i, j, k, m, n, ksize;
//kai
// int k14;
//consistent_data(&k14, "int", 1);
//---------------------------------------------------------------------
//---------------------------------------------------------------------
if (timeron) timer_start(t_zsolve);
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side for the three z-factors
//---------------------------------------------------------------------
ksize = grid_points[2]-1;
//---------------------------------------------------------------------
// Compute the indices for storing the block-diagonal matrix;
// determine c (labeled f) and s jacobians
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) shared(ksize) private(i,j,k,m,n)
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 0; k <= ksize; k++) {
tmp1 = 1.0 / u[k][j][i][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[k][0][0] = 0.0;
fjac[k][1][0] = 0.0;
fjac[k][2][0] = 0.0;
fjac[k][3][0] = 1.0;
fjac[k][4][0] = 0.0;
fjac[k][0][1] = - ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2;
fjac[k][1][1] = u[k][j][i][3] * tmp1;
fjac[k][2][1] = 0.0;
fjac[k][3][1] = u[k][j][i][1] * tmp1;
fjac[k][4][1] = 0.0;
fjac[k][0][2] = - ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2;
fjac[k][1][2] = 0.0;
fjac[k][2][2] = u[k][j][i][3] * tmp1;
fjac[k][3][2] = u[k][j][i][2] * tmp1;
fjac[k][4][2] = 0.0;
fjac[k][0][3] = - (u[k][j][i][3]*u[k][j][i][3] * tmp2 )
+ c2 * qs[k][j][i];
fjac[k][1][3] = - c2 * u[k][j][i][1] * tmp1;
fjac[k][2][3] = - c2 * u[k][j][i][2] * tmp1;
fjac[k][3][3] = ( 2.0 - c2 ) * u[k][j][i][3] * tmp1;
fjac[k][4][3] = c2;
fjac[k][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* u[k][j][i][3] * tmp2;
fjac[k][1][4] = - c2 * ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2;
fjac[k][2][4] = - c2 * ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2;
fjac[k][3][4] = c1 * ( u[k][j][i][4] * tmp1 )
- c2 * ( qs[k][j][i] + u[k][j][i][3]*u[k][j][i][3] * tmp2 );
fjac[k][4][4] = c1 * u[k][j][i][3] * tmp1;
njac[k][0][0] = 0.0;
njac[k][1][0] = 0.0;
njac[k][2][0] = 0.0;
njac[k][3][0] = 0.0;
njac[k][4][0] = 0.0;
njac[k][0][1] = - c3c4 * tmp2 * u[k][j][i][1];
njac[k][1][1] = c3c4 * tmp1;
njac[k][2][1] = 0.0;
njac[k][3][1] = 0.0;
njac[k][4][1] = 0.0;
njac[k][0][2] = - c3c4 * tmp2 * u[k][j][i][2];
njac[k][1][2] = 0.0;
njac[k][2][2] = c3c4 * tmp1;
njac[k][3][2] = 0.0;
njac[k][4][2] = 0.0;
njac[k][0][3] = - con43 * c3c4 * tmp2 * u[k][j][i][3];
njac[k][1][3] = 0.0;
njac[k][2][3] = 0.0;
njac[k][3][3] = con43 * c3 * c4 * tmp1;
njac[k][4][3] = 0.0;
njac[k][0][4] = - ( c3c4
- c1345 ) * tmp3 * (u[k][j][i][1]*u[k][j][i][1])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2]*u[k][j][i][2])
- ( con43 * c3c4
- c1345 ) * tmp3 * (u[k][j][i][3]*u[k][j][i][3])
- c1345 * tmp2 * u[k][j][i][4];
njac[k][1][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][1];
njac[k][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2];
njac[k][3][4] = ( con43 * c3c4
- c1345 ) * tmp2 * u[k][j][i][3];
njac[k][4][4] = ( c1345 )* tmp1;
}
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in z direction
//---------------------------------------------------------------------
lhsinit(lhs, ksize);
for (k = 1; k <= ksize-1; k++) {
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[k][AA][0][0] = - tmp2 * fjac[k-1][0][0]
- tmp1 * njac[k-1][0][0]
- tmp1 * dz1;
lhs[k][AA][1][0] = - tmp2 * fjac[k-1][1][0]
- tmp1 * njac[k-1][1][0];
lhs[k][AA][2][0] = - tmp2 * fjac[k-1][2][0]
- tmp1 * njac[k-1][2][0];
lhs[k][AA][3][0] = - tmp2 * fjac[k-1][3][0]
- tmp1 * njac[k-1][3][0];
lhs[k][AA][4][0] = - tmp2 * fjac[k-1][4][0]
- tmp1 * njac[k-1][4][0];
lhs[k][AA][0][1] = - tmp2 * fjac[k-1][0][1]
- tmp1 * njac[k-1][0][1];
lhs[k][AA][1][1] = - tmp2 * fjac[k-1][1][1]
- tmp1 * njac[k-1][1][1]
- tmp1 * dz2;
lhs[k][AA][2][1] = - tmp2 * fjac[k-1][2][1]
- tmp1 * njac[k-1][2][1];
lhs[k][AA][3][1] = - tmp2 * fjac[k-1][3][1]
- tmp1 * njac[k-1][3][1];
lhs[k][AA][4][1] = - tmp2 * fjac[k-1][4][1]
- tmp1 * njac[k-1][4][1];
lhs[k][AA][0][2] = - tmp2 * fjac[k-1][0][2]
- tmp1 * njac[k-1][0][2];
lhs[k][AA][1][2] = - tmp2 * fjac[k-1][1][2]
- tmp1 * njac[k-1][1][2];
lhs[k][AA][2][2] = - tmp2 * fjac[k-1][2][2]
- tmp1 * njac[k-1][2][2]
- tmp1 * dz3;
lhs[k][AA][3][2] = - tmp2 * fjac[k-1][3][2]
- tmp1 * njac[k-1][3][2];
lhs[k][AA][4][2] = - tmp2 * fjac[k-1][4][2]
- tmp1 * njac[k-1][4][2];
lhs[k][AA][0][3] = - tmp2 * fjac[k-1][0][3]
- tmp1 * njac[k-1][0][3];
lhs[k][AA][1][3] = - tmp2 * fjac[k-1][1][3]
- tmp1 * njac[k-1][1][3];
lhs[k][AA][2][3] = - tmp2 * fjac[k-1][2][3]
- tmp1 * njac[k-1][2][3];
lhs[k][AA][3][3] = - tmp2 * fjac[k-1][3][3]
- tmp1 * njac[k-1][3][3]
- tmp1 * dz4;
lhs[k][AA][4][3] = - tmp2 * fjac[k-1][4][3]
- tmp1 * njac[k-1][4][3];
lhs[k][AA][0][4] = - tmp2 * fjac[k-1][0][4]
- tmp1 * njac[k-1][0][4];
lhs[k][AA][1][4] = - tmp2 * fjac[k-1][1][4]
- tmp1 * njac[k-1][1][4];
lhs[k][AA][2][4] = - tmp2 * fjac[k-1][2][4]
- tmp1 * njac[k-1][2][4];
lhs[k][AA][3][4] = - tmp2 * fjac[k-1][3][4]
- tmp1 * njac[k-1][3][4];
lhs[k][AA][4][4] = - tmp2 * fjac[k-1][4][4]
- tmp1 * njac[k-1][4][4]
- tmp1 * dz5;
lhs[k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[k][0][0]
+ tmp1 * 2.0 * dz1;
lhs[k][BB][1][0] = tmp1 * 2.0 * njac[k][1][0];
lhs[k][BB][2][0] = tmp1 * 2.0 * njac[k][2][0];
lhs[k][BB][3][0] = tmp1 * 2.0 * njac[k][3][0];
lhs[k][BB][4][0] = tmp1 * 2.0 * njac[k][4][0];
lhs[k][BB][0][1] = tmp1 * 2.0 * njac[k][0][1];
lhs[k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[k][1][1]
+ tmp1 * 2.0 * dz2;
lhs[k][BB][2][1] = tmp1 * 2.0 * njac[k][2][1];
lhs[k][BB][3][1] = tmp1 * 2.0 * njac[k][3][1];
lhs[k][BB][4][1] = tmp1 * 2.0 * njac[k][4][1];
lhs[k][BB][0][2] = tmp1 * 2.0 * njac[k][0][2];
lhs[k][BB][1][2] = tmp1 * 2.0 * njac[k][1][2];
lhs[k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[k][2][2]
+ tmp1 * 2.0 * dz3;
lhs[k][BB][3][2] = tmp1 * 2.0 * njac[k][3][2];
lhs[k][BB][4][2] = tmp1 * 2.0 * njac[k][4][2];
lhs[k][BB][0][3] = tmp1 * 2.0 * njac[k][0][3];
lhs[k][BB][1][3] = tmp1 * 2.0 * njac[k][1][3];
lhs[k][BB][2][3] = tmp1 * 2.0 * njac[k][2][3];
lhs[k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[k][3][3]
+ tmp1 * 2.0 * dz4;
lhs[k][BB][4][3] = tmp1 * 2.0 * njac[k][4][3];
lhs[k][BB][0][4] = tmp1 * 2.0 * njac[k][0][4];
lhs[k][BB][1][4] = tmp1 * 2.0 * njac[k][1][4];
lhs[k][BB][2][4] = tmp1 * 2.0 * njac[k][2][4];
lhs[k][BB][3][4] = tmp1 * 2.0 * njac[k][3][4];
lhs[k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[k][4][4]
+ tmp1 * 2.0 * dz5;
lhs[k][CC][0][0] = tmp2 * fjac[k+1][0][0]
- tmp1 * njac[k+1][0][0]
- tmp1 * dz1;
lhs[k][CC][1][0] = tmp2 * fjac[k+1][1][0]
- tmp1 * njac[k+1][1][0];
lhs[k][CC][2][0] = tmp2 * fjac[k+1][2][0]
- tmp1 * njac[k+1][2][0];
lhs[k][CC][3][0] = tmp2 * fjac[k+1][3][0]
- tmp1 * njac[k+1][3][0];
lhs[k][CC][4][0] = tmp2 * fjac[k+1][4][0]
- tmp1 * njac[k+1][4][0];
lhs[k][CC][0][1] = tmp2 * fjac[k+1][0][1]
- tmp1 * njac[k+1][0][1];
lhs[k][CC][1][1] = tmp2 * fjac[k+1][1][1]
- tmp1 * njac[k+1][1][1]
- tmp1 * dz2;
lhs[k][CC][2][1] = tmp2 * fjac[k+1][2][1]
- tmp1 * njac[k+1][2][1];
lhs[k][CC][3][1] = tmp2 * fjac[k+1][3][1]
- tmp1 * njac[k+1][3][1];
lhs[k][CC][4][1] = tmp2 * fjac[k+1][4][1]
- tmp1 * njac[k+1][4][1];
lhs[k][CC][0][2] = tmp2 * fjac[k+1][0][2]
- tmp1 * njac[k+1][0][2];
lhs[k][CC][1][2] = tmp2 * fjac[k+1][1][2]
- tmp1 * njac[k+1][1][2];
lhs[k][CC][2][2] = tmp2 * fjac[k+1][2][2]
- tmp1 * njac[k+1][2][2]
- tmp1 * dz3;
lhs[k][CC][3][2] = tmp2 * fjac[k+1][3][2]
- tmp1 * njac[k+1][3][2];
lhs[k][CC][4][2] = tmp2 * fjac[k+1][4][2]
- tmp1 * njac[k+1][4][2];
lhs[k][CC][0][3] = tmp2 * fjac[k+1][0][3]
- tmp1 * njac[k+1][0][3];
lhs[k][CC][1][3] = tmp2 * fjac[k+1][1][3]
- tmp1 * njac[k+1][1][3];
lhs[k][CC][2][3] = tmp2 * fjac[k+1][2][3]
- tmp1 * njac[k+1][2][3];
lhs[k][CC][3][3] = tmp2 * fjac[k+1][3][3]
- tmp1 * njac[k+1][3][3]
- tmp1 * dz4;
lhs[k][CC][4][3] = tmp2 * fjac[k+1][4][3]
- tmp1 * njac[k+1][4][3];
lhs[k][CC][0][4] = tmp2 * fjac[k+1][0][4]
- tmp1 * njac[k+1][0][4];
lhs[k][CC][1][4] = tmp2 * fjac[k+1][1][4]
- tmp1 * njac[k+1][1][4];
lhs[k][CC][2][4] = tmp2 * fjac[k+1][2][4]
- tmp1 * njac[k+1][2][4];
lhs[k][CC][3][4] = tmp2 * fjac[k+1][3][4]
- tmp1 * njac[k+1][3][4];
lhs[k][CC][4][4] = tmp2 * fjac[k+1][4][4]
- tmp1 * njac[k+1][4][4]
- tmp1 * dz5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(KMAX) and rhs'(KMAX) will be sent to next cell.
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[0][j][i] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs( lhs[0][BB], lhs[0][CC], rhs[0][j][i] );
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
for (k = 1; k <= ksize-1; k++) {
//-------------------------------------------------------------------
// subtract A*lhs_vector(k-1) from lhs_vector(k)
//
// rhs(k) = rhs(k) - A*rhs(k-1)
//-------------------------------------------------------------------
matvec_sub(lhs[k][AA], rhs[k-1][j][i], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(k) = B(k) - C(k-1)*A(k)
// matmul_sub(AA,i,j,k,c,CC,i,j,k-1,c,BB,i,j,k)
//-------------------------------------------------------------------
matmul_sub(lhs[k][AA], lhs[k-1][CC], lhs[k][BB]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[0][j][i] by b_inverse[0][j][i] and copy to rhs
//-------------------------------------------------------------------
binvcrhs( lhs[k][BB], lhs[k][CC], rhs[k][j][i] );
}
//---------------------------------------------------------------------
// Now finish up special cases for last cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[ksize][AA], rhs[ksize-1][j][i], rhs[ksize][j][i]);
//---------------------------------------------------------------------
// B(ksize) = B(ksize) - C(ksize-1)*A(ksize)
// matmul_sub(AA,i,j,ksize,c,
// $ CC,i,j,ksize-1,c,BB,i,j,ksize)
//---------------------------------------------------------------------
matmul_sub(lhs[ksize][AA], lhs[ksize-1][CC], lhs[ksize][BB]);
//---------------------------------------------------------------------
// multiply rhs(ksize) by b_inverse(ksize) and copy to rhs
//---------------------------------------------------------------------
binvrhs( lhs[ksize][BB], rhs[ksize][j][i] );
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(ksize)=rhs(ksize)
// else assume U(ksize) is loaded in un pack backsub_info
// so just use it
// after u(kstart) will be sent to next cell
//---------------------------------------------------------------------
for (k = ksize-1; k >= 0; k--) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhs[k][CC][n][m]*rhs[k+1][j][i][n];
}
}
}
}
//kai
k14 = j;
// printf("k14=%p\n",&k14);
}
if (timeron) timer_stop(t_zsolve);
}
|
rnn_helpers.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#ifdef _WIN32
#pragma warning(disable : 4267)
#endif
#include <algorithm>
#include <functional>
#include <future>
#include <string>
#include <vector>
#include "gsl/gsl"
#include "core/common/common.h"
#include "core/common/logging/logging.h"
#include "core/framework/allocator.h"
#include "core/util/math.h"
#include "core/util/math_cpuonly.h"
#include "core/platform/threadpool.h"
namespace onnxruntime {
class Tensor;
class OpKernelContext;
namespace rnn {
namespace detail {
enum Direction {
kForward = 0,
kReverse = 1,
kBidirectional = 2
};
inline Direction MakeDirection(const std::string& direction) {
if (direction == "forward") {
return kForward;
}
if (direction == "reverse") {
return kReverse;
}
if (direction == "bidirectional") {
return kBidirectional;
}
ORT_THROW("Invalid 'direction' argument of '", direction,
"'. Must be one of 'forward', 'reverse', or 'bidirectional'.");
}
/** Allocate a unique_ptr using allocator_, and return a span to the allocated memory so usage is safe
@param allocator IAllocator to use for the allocation.
@param size Allocation size. Number of elements of type TAlloc, or total size if TAlloc is 'void'.
@param unique_ptr unique_ptr that will control the lifetime of the allocated memory.
@param fill If true, fill the allocated memory with fill_value.
@param fill_value Value to use if 'fill' is true.
@returns A span to provide bounds checked access to the allocated memory.
*/
template <typename TAlloc>
gsl::span<TAlloc> Allocate(std::shared_ptr<IAllocator> allocator,
size_t size,
IAllocatorUniquePtr<TAlloc>& unique_ptr,
bool fill = false, TAlloc fill_value = TAlloc{}) {
unique_ptr = IAllocator::MakeUniquePtr<TAlloc>(allocator, size);
auto span = gsl::make_span(unique_ptr.get(), size);
if (fill) {
// Do't use span.begin() it will cause performance issue and stop compiler to optimize the code
std::fill_n(unique_ptr.get(), size, fill_value);
}
return span;
}
// validate the common inputs to RNN, LSTM and GRU operators
Status ValidateCommonRnnInputs(const Tensor& X,
const Tensor& W,
const Tensor& R,
const Tensor* B,
int WRB_dim_1_multipler, // multiplier used with hidden_size for W, R and B inputs
const Tensor* sequence_lens,
const Tensor* initial_h,
int64_t num_directions,
int64_t hidden_size);
/// Copy an input array repeatedly to an output array
/// @param input_begin Beginning of input
/// @param input_end End of input
/// @param output Output iterator
/// @param repetitions Number of times to repeat copy. Assumes output is sufficiently sized.
/// @returns Position of output iterator after copy is completed
template <typename TInIter, typename TOutIter>
TOutIter RepeatVectorToConstructArray(TInIter input_begin,
TInIter input_end,
TOutIter output,
int64_t repetitions) {
for (int64_t i = 0; i < repetitions; i++) {
output = std::copy(input_begin, input_end, output);
}
return output;
}
// reverse an LSTM or GRU sequence which has shape [seq_length, batch_size, hidden_size]
// and output to shape [seq_length, num_directions, batch_size, hidden_size]
template <typename T>
void ReverseSequence(gsl::span<const T> inputs,
gsl::span<T> inputs_reverse,
gsl::span<const int> sequence_lengths,
const int max_sequence_length,
const int batch_size,
const int input_size,
const int num_directions) {
for (int i = 0; i < batch_size; i++) {
int seq_len = sequence_lengths[i];
#ifdef USE_OPENMP
// Parallel execute the loop.
#pragma omp parallel for
#endif
for (int j = 0; j < seq_len; j++) {
gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size);
gsl::span<T> dest = inputs_reverse.subspan(num_directions * (seq_len - j - 1) * batch_size * input_size + i * input_size, input_size);
// Use gsl::copy instead of std::copy() to allow compiler to optimize the code
gsl::copy(src, dest);
}
#ifdef USE_OPENMP
// Parallel execute the loop.
#pragma omp parallel for
#endif
for (int j = seq_len; j < max_sequence_length; j++) {
gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size);
gsl::span<T> dest = inputs_reverse.subspan(num_directions * j * batch_size * input_size + i * input_size, input_size);
// Use gsl::copy instead of std::copy() to allow compiler to optimize the code
gsl::copy(src, dest);
}
}
}
// A has size M x K, B has size N x K (transposed), and C has size M x N
// We check that A, B and C are large enough before calling the lower level GEMM implementation
template <typename TSpanAIter, typename TSpanBIter, typename TSpanCIter>
void ComputeGemm(const int M,
const int N,
const int K,
const float alpha,
TSpanAIter A,
TSpanAIter A_end,
const int lda,
TSpanBIter B,
TSpanBIter B_end,
const int ldb,
const float beta,
TSpanCIter C,
TSpanCIter C_end,
const int ldc, concurrency::ThreadPool* tp) {
// validate all the inputs
// need to use the lda/ldb/ldc strides which should be >= the columns for the span
ORT_ENFORCE(lda >= K && ldb >= K && ldc >= N);
ORT_ENFORCE(A + (M * lda - (lda - K)) <= A_end);
ORT_ENFORCE(B + (N * ldb - (ldb - K)) <= B_end);
ORT_ENFORCE(C + (M * ldc - (ldc - N)) <= C_end);
::onnxruntime::math::GemmEx<float>(
CblasNoTrans, CblasTrans,
M, N, K, alpha,
&*A, lda,
&*B, ldb, beta,
&*C, ldc, tp);
}
// helper to convert a span to a raw pointer
// after validating the memory covered by the span supports the size required
template <typename T>
const T* SafeRawConstPointer(typename gsl::span<T>::const_iterator cur,
typename gsl::span<T>::const_iterator end,
size_t size) {
ORT_ENFORCE(cur + size <= end);
return &*cur;
}
// helper to convert a span to a raw pointer
// after validating the memory covered by the span supports the size required
template <typename T>
const T* SafeRawConstPointer(gsl::span<T> span, size_t offset, size_t size) {
ORT_ENFORCE(offset + size <= size_t(span.size()));
return span.data();
}
// helper to convert a span to a raw pointer
// after validating the memory covered by the span supports the size required
template <typename T>
T* SafeRawPointer(typename gsl::span<T>::iterator cur,
typename gsl::span<T>::iterator end,
size_t size) {
ORT_ENFORCE(cur + size <= end);
return &*cur;
}
// helper to convert a span to a raw pointer
// after validating the memory covered by the span supports the size required
template <typename T>
T* SafeRawPointer(typename gsl::span<T> span, size_t offset, size_t size) {
ORT_ENFORCE(offset + size <= size_t(span.size()));
return span.data() + offset;
}
template <typename TLambda>
void ExecuteLambdaInParallel(const std::string& name, TLambda lambda, int max, int step,
onnxruntime::concurrency::ThreadPool& ttp,
const ::onnxruntime::logging::Logger& logger) {
// #define NOTHREADS to execute the lambdas directly and in order if you need to do that to debug
#ifdef NOTHREADS
ORT_UNUSED_PARAMETER(ttp);
ORT_UNUSED_PARAMETER(logger);
for (int i = 0; i < max; i += step) {
(void)name;
std::bind(lambda, i)();
}
#else
ORT_UNUSED_PARAMETER(name);
ORT_UNUSED_PARAMETER(logger);
// ORT_ENFORCE may and does throw at times from within the tasks that run
// on a thread-pool. Without propagating exceptions the process exits silently
// which will make diagnosing bugs more difficult.
// \! UGLY
// We have a problem here with the current thread-pool is that it takes std::function
// by value and copies it more than once (even though it is movable).
//
// To report status and exceptions properly it's better to use
// futures and promises but they are not copyable, so we can't come up with a functor
// with a promise member and we are downgrading to C++11 where we can't have captures that moved in.
//
// At the same time promises MUST live in the child thread so if we throw from the main thread
// we don't destroy any promises that are on the main thread stack which children threads may still be using.
//
// The only solution with the current Eigen that comes to mind is to have shared_ptr to with std::promise.
//
const int total_tasks = max / (step > 0 ? step : 1) + (max % step > 0 ? 1 : 0);
std::vector<std::future<void> > futures;
futures.reserve(total_tasks);
for (int i = 0, t = 0; i < max; i += step, ++t) {
auto p_ptr = std::make_shared<std::promise<void> >();
futures.push_back(p_ptr->get_future());
ttp.Schedule([p_ptr, lambda, i]() {
try {
lambda(i);
p_ptr->set_value();
} catch (...) {
p_ptr->set_exception(std::current_exception());
}
});
}
// We'd like to wait until all of the tasks have finished
// even though one or more have already thrown. We will store
// the first exception and then will re-throw at the end.
std::exception_ptr pending_exception;
for (auto& fut : futures) {
try {
// get() will re-throw any exceptions
// the running task may throw
fut.get();
} catch (...) {
if (!pending_exception) {
pending_exception = std::current_exception();
}
}
}
if (pending_exception) {
std::rethrow_exception(pending_exception);
}
#endif
}
void DumpMatrixImpl(const std::string& name, const float* src, int row, int col,
int offset = 0, int col_width = -1);
// Helper class to wrap the processing of the activation funcs and any alpha/beta values.
// The alpha/beta values are consumed in the order of the activation funcs. once they run out
// defaults will be used as needed.
// The Entries property contains the normalized function names and the alpha/beta value to use.
class ActivationFuncs {
public:
struct Entry {
const std::string name;
const float alpha;
const float beta;
};
ActivationFuncs() = default;
ActivationFuncs(const std::vector<std::string>& funcs,
const std::vector<float>& alphas,
const std::vector<float>& betas);
const std::vector<Entry>& Entries() const {
return entries_;
}
private:
std::vector<Entry> entries_;
};
namespace deepcpu {
using AddBiasIntoFuncPtr = void (*)(const float*, float*, const int);
using ClipWithBiasFuncPtr = void (*)(float, const float*, float*, const int);
using ActivationFuncPtr = void (*)(float*, int, float, float);
using ActivationFuncBPtr = void (*)(const float*, float*, int, float, float);
using LstmMergeGatesFuncPtr = void (*)(const float*, float*, const float*, float*, int, float, float);
using GruResetGateFuncPtr = void (*)(const float*, float*, float*, int, float, float);
using GruOutputGateFuncPtr = void (*)(float*, const float*, const float*, float*, int, float, float);
ActivationFuncPtr ActivationFuncByName(const std::string& func);
LstmMergeGatesFuncPtr LstmMergeGatesFuncByName(const std::string& func);
GruResetGateFuncPtr GruResetGateFuncByName(const std::string& func);
GruOutputGateFuncPtr GruOutputGateFuncByName(const std::string& func);
void add_bias_into_ignore(const float* ignored, const float* pd, int c);
void add_bias_into(const float* ps, float* pd, int c);
void clip(float b, float* pd, int c);
void clip_add_bias(float b, const float* pb, float* pd, int c);
void clip_ignore_bias(float b, const float* pb, float* pd, int c);
void sigmoid_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta);
void tanh_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta);
void relu_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta);
void sigmoid_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta);
void tanh_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta);
void sigmoid(float* pd, int c, float alpha, float beta);
void tanh(float* pd, int c, float alpha, float beta);
void relu(float* pd, int c, float alpha, float beta);
void sigmoid_exact(float* pd, int c, float alpha, float beta);
void tanh_exact(float* pd, int c, float alpha, float beta);
void merge_lstm_gates_to_memory(const float* pprev, const float* pi, const float* pf, const float* pg, float* pcurr,
int c);
void gru_reset_gate_tanh(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta);
void gru_reset_gate_sigmoid(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta);
void gru_reset_gate_relu(const float* ps1, const float* ps2, float* pd, int c, float alpha, float beta);
void gru_output_gate_tanh(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta);
void gru_output_gate_sigmoid(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta);
void gru_output_gate_relu(const float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta);
inline void elementwise_product(const float* op1, const float* op2, float* dest, int size) {
for (int i = 0; i < size; i++)
dest[i] += op1[i] * op2[i];
}
inline void elementwise_sum1(const float* src, float* dest, int size) {
for (int i = 0; i < size; i++)
dest[i] += src[i];
}
inline void elementwise_sum2(const float* src1, const float* src2, float* dest, int size) {
for (int i = 0; i < size; i++)
dest[i] += src1[i] + src2[i];
}
} // namespace deepcpu
} // namespace detail
} // namespace rnn
} // namespace onnxruntime
|
PIOpenMP.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char *argv[]) {
unsigned int processes = 8u;
unsigned long iter = 1000000000ul; // 1E9
if (argc > 1) {
processes = (unsigned int) atoi(argv[1]);
}
if (argc > 2) {
iter = (unsigned long) atol(argv[2]);
}
long double pi = 0.0;
omp_set_num_threads(processes);
#pragma omp parallel for reduction(+:pi)
for (unsigned long i = 0ul; i < iter; ++i) {
if (i % 2 == 0)
pi += 1.0 / ((i << 1) + 1);
else
pi -= 1.0 / ((i << 1) + 1);
}
printf("Processes\t%d\n", processes);
printf("Iterations\t%ld\n", iter);
printf("Sum\t\t%.100Lf\n", pi);
printf("PI\t\t%.100Lf\n", pi * 4.0);
return EXIT_SUCCESS;
}
|
GB_binop__bset_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int64)
// C=scalar+B GB (_bind1st__bset_int64)
// C=scalar+B' GB (_bind1st_tran__bset_int64)
// C=A+scalar GB (_bind2nd__bset_int64)
// C=A'+scalar GB (_bind2nd_tran__bset_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = GB_BITSET (aij, bij, int64_t, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, int64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_INT64 || GxB_NO_BSET_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, int64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bset_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, int64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.